From e694134054db9c206aec4b6e6fa26b8ab807d6eb Mon Sep 17 00:00:00 2001 From: awstools Date: Mon, 18 Jul 2022 18:30:51 +0000 Subject: [PATCH 01/31] feat(client-elasticache): Adding AutoMinorVersionUpgrade in the DescribeReplicationGroups API --- .../client-elasticache/src/models/models_0.ts | 60 ++++++++++--------- .../src/protocols/Aws_query.ts | 4 ++ .../sdk-codegen/aws-models/elasticache.json | 24 +++++--- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/clients/client-elasticache/src/models/models_0.ts b/clients/client-elasticache/src/models/models_0.ts index cc0009c505e0b..9b0e70093349f 100644 --- a/clients/client-elasticache/src/models/models_0.ts +++ b/clients/client-elasticache/src/models/models_0.ts @@ -1380,6 +1380,12 @@ export interface ReplicationGroup { * For more information, see Data tiering.

*/ DataTiering?: DataTieringStatus | string; + + /** + *

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + *

+ */ + AutoMinorVersionUpgrade?: boolean; } export namespace ReplicationGroup { @@ -1708,7 +1714,7 @@ export interface Snapshot { * * *
  • - *

    Previous generation: (not recommended)

    + *

    Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

    *

    * T1 node types: * cache.t1.micro @@ -1738,7 +1744,7 @@ export interface Snapshot { * *

    * - *

    A key policy document must conform to the following rules.

    + *

    A key policy document can include only the following characters:

    *
      *
    • - *

      Up to 32 kilobytes (32768 bytes)

      - *
    • - *
    • - *

      Must be UTF-8 encoded

      + *

      Printable ASCII characters from the space character (\u0020) through the end of the ASCII character range.

      *
    • *
    • - *

      The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.

      + *

      Printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF).

      *
    • *
    • - *

      The Sid element in a key policy statement can include spaces. (Spaces are - * prohibited in the Sid element of an IAM policy document.)

      + *

      The tab (\u0009), line feed (\u000A), and carriage return (\u000D) special characters

      *
    • *
    + *

    For information about key policies, see Key policies in KMS in the + * Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the + * Identity and Access Management User Guide + * .

    */ Policy: string | undefined; @@ -4314,22 +4332,22 @@ export interface ReplicateKeyRequest { * * * - *

    A key policy document must conform to the following rules.

    + *

    A key policy document can include only the following characters:

    *
      *
    • - *

      Up to 32 kilobytes (32768 bytes)

      - *
    • - *
    • - *

      Must be UTF-8 encoded

      + *

      Printable ASCII characters from the space character (\u0020) through the end of the ASCII character range.

      *
    • *
    • - *

      The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.

      + *

      Printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF).

      *
    • *
    • - *

      The Sid element in a key policy statement can include spaces. (Spaces are - * prohibited in the Sid element of an IAM policy document.)

      + *

      The tab (\u0009), line feed (\u000A), and carriage return (\u000D) special characters

      *
    • *
    + *

    For information about key policies, see Key policies in KMS in the + * Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the + * Identity and Access Management User Guide + * .

    */ Policy?: string; @@ -4760,7 +4778,7 @@ export interface UpdateAliasRequest { /** *

    Identifies the alias that is changing its KMS key. This value must begin with * alias/ followed by the alias name, such as alias/ExampleAlias. You - * cannot use UpdateAlias to change the alias name.

    + * cannot use UpdateAlias to change the alias name.

    */ AliasName: string | undefined; diff --git a/clients/client-kms/src/pagination/DescribeCustomKeyStoresPaginator.ts b/clients/client-kms/src/pagination/DescribeCustomKeyStoresPaginator.ts new file mode 100644 index 0000000000000..95d9ac5d6054f --- /dev/null +++ b/clients/client-kms/src/pagination/DescribeCustomKeyStoresPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + DescribeCustomKeyStoresCommand, + DescribeCustomKeyStoresCommandInput, + DescribeCustomKeyStoresCommandOutput, +} from "../commands/DescribeCustomKeyStoresCommand"; +import { KMS } from "../KMS"; +import { KMSClient } from "../KMSClient"; +import { KMSPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: KMSClient, + input: DescribeCustomKeyStoresCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new DescribeCustomKeyStoresCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: KMS, + input: DescribeCustomKeyStoresCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.describeCustomKeyStores(input, ...args); +}; +export async function* paginateDescribeCustomKeyStores( + config: KMSPaginationConfiguration, + input: DescribeCustomKeyStoresCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.Marker + let token: typeof input.Marker | undefined = config.startingToken || undefined; + let hasNext = true; + let page: DescribeCustomKeyStoresCommandOutput; + while (hasNext) { + input.Marker = token; + input["Limit"] = config.pageSize; + if (config.client instanceof KMS) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof KMSClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected KMS | KMSClient"); + } + yield page; + const prevToken = token; + token = page.NextMarker; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-kms/src/pagination/ListResourceTagsPaginator.ts b/clients/client-kms/src/pagination/ListResourceTagsPaginator.ts new file mode 100644 index 0000000000000..303da893b6a2a --- /dev/null +++ b/clients/client-kms/src/pagination/ListResourceTagsPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListResourceTagsCommand, + ListResourceTagsCommandInput, + ListResourceTagsCommandOutput, +} from "../commands/ListResourceTagsCommand"; +import { KMS } from "../KMS"; +import { KMSClient } from "../KMSClient"; +import { KMSPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: KMSClient, + input: ListResourceTagsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListResourceTagsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: KMS, + input: ListResourceTagsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listResourceTags(input, ...args); +}; +export async function* paginateListResourceTags( + config: KMSPaginationConfiguration, + input: ListResourceTagsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.Marker + let token: typeof input.Marker | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListResourceTagsCommandOutput; + while (hasNext) { + input.Marker = token; + input["Limit"] = config.pageSize; + if (config.client instanceof KMS) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof KMSClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected KMS | KMSClient"); + } + yield page; + const prevToken = token; + token = page.NextMarker; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-kms/src/pagination/ListRetirableGrantsPaginator.ts b/clients/client-kms/src/pagination/ListRetirableGrantsPaginator.ts new file mode 100644 index 0000000000000..19dd4cf981922 --- /dev/null +++ b/clients/client-kms/src/pagination/ListRetirableGrantsPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListRetirableGrantsCommand, + ListRetirableGrantsCommandInput, + ListRetirableGrantsCommandOutput, +} from "../commands/ListRetirableGrantsCommand"; +import { KMS } from "../KMS"; +import { KMSClient } from "../KMSClient"; +import { KMSPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: KMSClient, + input: ListRetirableGrantsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListRetirableGrantsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: KMS, + input: ListRetirableGrantsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listRetirableGrants(input, ...args); +}; +export async function* paginateListRetirableGrants( + config: KMSPaginationConfiguration, + input: ListRetirableGrantsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.Marker + let token: typeof input.Marker | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListRetirableGrantsCommandOutput; + while (hasNext) { + input.Marker = token; + input["Limit"] = config.pageSize; + if (config.client instanceof KMS) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof KMSClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected KMS | KMSClient"); + } + yield page; + const prevToken = token; + token = page.NextMarker; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-kms/src/pagination/index.ts b/clients/client-kms/src/pagination/index.ts index a3fc1b56b51a2..f6dcbd87bd529 100644 --- a/clients/client-kms/src/pagination/index.ts +++ b/clients/client-kms/src/pagination/index.ts @@ -1,6 +1,9 @@ +export * from "./DescribeCustomKeyStoresPaginator"; // smithy-typescript generated code export * from "./Interfaces"; export * from "./ListAliasesPaginator"; export * from "./ListGrantsPaginator"; export * from "./ListKeyPoliciesPaginator"; export * from "./ListKeysPaginator"; +export * from "./ListResourceTagsPaginator"; +export * from "./ListRetirableGrantsPaginator"; diff --git a/codegen/sdk-codegen/aws-models/kms.json b/codegen/sdk-codegen/aws-models/kms.json index a882ebc55e7d5..3e28092ad2559 100644 --- a/codegen/sdk-codegen/aws-models/kms.json +++ b/codegen/sdk-codegen/aws-models/kms.json @@ -373,6 +373,10 @@ { "value": "SUBNET_NOT_FOUND", "name": "SUBNET_NOT_FOUND" + }, + { + "value": "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET", + "name": "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET" } ] } @@ -490,7 +494,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Creates a custom key store that is associated with an CloudHSM cluster that you own and\n manage.

    \n

    This operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n

    Before you create the custom key store, you must assemble\n the required elements, including an CloudHSM cluster that fulfills the requirements for a custom\n key store. For details about the required elements, see Assemble the Prerequisites\n in the Key Management Service Developer Guide.

    \n

    When the operation completes successfully, it returns the ID of the new custom key store.\n Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM\n cluster. Even if you are not going to use your custom key store immediately, you might want to\n connect it to verify that all settings are correct and then disconnect it until you are ready\n to use it.

    \n

    For help with failures, see Troubleshooting a Custom Key Store in the\n Key Management Service Developer Guide.

    \n

    \n Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n

    \n Required permissions: kms:CreateCustomKeyStore (IAM policy).

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Creates a custom key store that is associated with an CloudHSM cluster that you own and\n manage.

    \n

    This operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n

    Before you create the custom key store, you must assemble\n the required elements, including an CloudHSM cluster that fulfills the requirements for a custom\n key store. For details about the required elements, see Assemble the Prerequisites\n in the Key Management Service Developer Guide.

    \n

    When the operation completes successfully, it returns the ID of the new custom key store.\n Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM\n cluster. Even if you are not going to use your custom key store immediately, you might want to\n connect it to verify that all settings are correct and then disconnect it until you are ready\n to use it.

    \n

    For help with failures, see Troubleshooting a Custom Key Store in the\n Key Management Service Developer Guide.

    \n

    \n Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n

    \n Required permissions: kms:CreateCustomKeyStore (IAM policy).

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#CreateCustomKeyStoreRequest": { @@ -506,22 +510,19 @@ "CloudHsmClusterId": { "target": "com.amazonaws.kms#CloudHsmClusterIdType", "traits": { - "smithy.api#documentation": "

    Identifies the CloudHSM cluster for the custom key store. Enter the cluster ID of any active\n CloudHSM cluster that is not already associated with a custom key store. To find the cluster ID,\n use the DescribeClusters operation.

    ", - "smithy.api#required": {} + "smithy.api#documentation": "

    Identifies the CloudHSM cluster for the custom key store. Enter the cluster ID of any active\n CloudHSM cluster that is not already associated with a custom key store. To find the cluster ID,\n use the DescribeClusters operation.

    " } }, "TrustAnchorCertificate": { "target": "com.amazonaws.kms#TrustAnchorCertificateType", "traits": { - "smithy.api#documentation": "

    Enter the content of the trust anchor certificate for the cluster. This is the content of\n the customerCA.crt file that you created when you initialized the cluster.

    ", - "smithy.api#required": {} + "smithy.api#documentation": "

    Enter the content of the trust anchor certificate for the cluster. This is the content of\n the customerCA.crt file that you created when you initialized the cluster.

    " } }, "KeyStorePassword": { "target": "com.amazonaws.kms#KeyStorePasswordType", "traits": { - "smithy.api#documentation": "

    Enter the password of the \n kmsuser crypto user\n (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this\n user to manage key material on your behalf.

    \n

    The password must be a string of 7 to 32 characters. Its value is case sensitive.

    \n

    This parameter tells KMS the kmsuser account password; it does not change\n the password in the CloudHSM cluster.

    ", - "smithy.api#required": {} + "smithy.api#documentation": "

    Enter the password of the \n kmsuser crypto user\n (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this\n user to manage key material on your behalf.

    \n

    The password must be a string of 7 to 32 characters. Its value is case sensitive.

    \n

    This parameter tells KMS the kmsuser account password; it does not change\n the password in the CloudHSM cluster.

    " } } } @@ -683,7 +684,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Creates a unique customer managed KMS key in your Amazon Web Services account and\n Region.

    \n

    In addition to the required parameters, you can use the optional parameters to specify a key policy, description, tags, and other useful elements for any key type.

    \n \n

    KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

    \n
    \n\n

    To create different types of KMS keys, use the following guidance:

    \n\n
    \n
    Symmetric encryption KMS key
    \n
    \n

    To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for\n KeySpec, SYMMETRIC_DEFAULT, and the default value for\n KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption KMS key.

    \n

    If you need a key for basic encryption and decryption or you \n are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

    \n

    \n
    \n
    Asymmetric KMS keys
    \n
    \n

    To create an asymmetric KMS key, use the KeySpec parameter to specify\n the type of key material in the KMS key. Then, use the KeyUsage parameter\n to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.\n You can't change these properties after the KMS key is created.

    \n

    Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric \n KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key\n so it can be used outside of KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). \n KMS keys with ECC key pairs can be used only to sign and verify messages. \n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

    \n

    \n
    \n
    HMAC KMS key
    \n
    \n

    To create an HMAC KMS key, set the KeySpec parameter to a\n key spec value for HMAC KMS keys. Then set the KeyUsage parameter to\n GENERATE_VERIFY_MAC. You must set the key usage even though\n GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys.\n You can't change these properties after the KMS key is created.

    \n

    HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use\n HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

    \n

    HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC\n KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the\n CreateKey operation returns an\n UnsupportedOperationException. For a list of Regions in which HMAC KMS keys\n are supported, see HMAC keys in\n KMS in the Key Management Service Developer Guide.

    \n

    \n
    \n
    Multi-Region primary keys
    \n
    Imported key material
    \n
    \n

    To create a multi-Region primary key in the local Amazon Web Services Region,\n use the MultiRegion parameter with a value of True. To create\n a multi-Region replica key, that is, a KMS key with the same key ID\n and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its\n primary key to a replica key, use the UpdatePrimaryRegion\n operation.

    \n

    You can create multi-Region KMS keys for all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't create multi-Region keys in a custom key store.

    \n

    This operation supports multi-Region keys, an KMS feature that lets you create multiple\n interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key\n material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt\n it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

    \n

    \n
    \n
    \n

    To import your own key material, begin by creating a symmetric encryption KMS key with no key\n material. To do this, use the Origin parameter of CreateKey\n with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt\n your key material. Then, use ImportKeyMaterial with your import token\n to import the key material. For step-by-step instructions, see Importing Key Material in the \n Key Management Service Developer Guide\n .

    \n

    This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key\n material into any other type of KMS key.

    \n

    To create a multi-Region primary key with imported key material, use the\n Origin parameter of CreateKey with a value of\n EXTERNAL and the MultiRegion parameter with a value of\n True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

    \n

    \n
    \n
    Custom key store
    \n
    \n

    To create a symmetric encryption KMS key in a custom key store, use the\n CustomKeyStoreId parameter to specify the custom key store. You must also\n use the Origin parameter with a value of AWS_CLOUDHSM. The\n CloudHSM cluster that is associated with the custom key store must have at least two active\n HSMs in different Availability Zones in the Amazon Web Services Region.

    \n

    Custom key stores support only symmetric encryption KMS keys. You cannot create an\n HMAC KMS key or an asymmetric KMS key in a custom key store. For information about\n custom key stores in KMS see Custom key stores in KMS in\n the \n Key Management Service Developer Guide\n .

    \n
    \n
    \n

    \n Cross-account use: No. You cannot use this operation to\n create a KMS key in a different Amazon Web Services account.

    \n\n

    \n Required permissions: kms:CreateKey (IAM policy). To use the\n Tags parameter, kms:TagResource (IAM policy). For examples and information about related\n permissions, see Allow a user to create\n KMS keys in the Key Management Service Developer Guide.

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Creates a unique customer managed KMS key in your Amazon Web Services account and\n Region.

    \n

    In addition to the required parameters, you can use the optional parameters to specify a key policy, description, tags, and other useful elements for any key type.

    \n \n

    KMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

    \n
    \n\n

    To create different types of KMS keys, use the following guidance:

    \n\n
    \n
    Symmetric encryption KMS key
    \n
    \n

    To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for\n KeySpec, SYMMETRIC_DEFAULT, and the default value for\n KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption KMS key. For technical details, see\n \n SYMMETRIC_DEFAULT key spec in the Key Management Service Developer Guide.

    \n

    If you need a key for basic encryption and decryption or you \n are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

    \n

    \n
    \n
    Asymmetric KMS keys
    \n
    \n

    To create an asymmetric KMS key, use the KeySpec parameter to specify\n the type of key material in the KMS key. Then, use the KeyUsage parameter\n to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.\n You can't change these properties after the KMS key is created.

    \n

    Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric \n KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key\n so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). \n KMS keys with ECC key pairs can be used only to sign and verify messages. \n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

    \n

    \n
    \n
    HMAC KMS key
    \n
    \n

    To create an HMAC KMS key, set the KeySpec parameter to a\n key spec value for HMAC KMS keys. Then set the KeyUsage parameter to\n GENERATE_VERIFY_MAC. You must set the key usage even though\n GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys.\n You can't change these properties after the KMS key is created.

    \n

    HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use\n HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

    \n

    HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC\n KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the\n CreateKey operation returns an\n UnsupportedOperationException. For a list of Regions in which HMAC KMS keys\n are supported, see HMAC keys in\n KMS in the Key Management Service Developer Guide.

    \n

    \n
    \n
    Multi-Region primary keys
    \n
    Imported key material
    \n
    \n

    To create a multi-Region primary key in the local Amazon Web Services Region,\n use the MultiRegion parameter with a value of True. To create\n a multi-Region replica key, that is, a KMS key with the same key ID\n and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its\n primary key to a replica key, use the UpdatePrimaryRegion\n operation.

    \n

    You can create multi-Region KMS keys for all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't create multi-Region keys in a custom key store.

    \n

    This operation supports multi-Region keys, an KMS feature that lets you create multiple\n interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key\n material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt\n it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

    \n

    \n
    \n
    \n

    To import your own key material, begin by creating a symmetric encryption KMS key with no key\n material. To do this, use the Origin parameter of CreateKey\n with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt\n your key material. Then, use ImportKeyMaterial with your import token\n to import the key material. For step-by-step instructions, see Importing Key Material in the \n Key Management Service Developer Guide\n .

    \n

    This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key\n material into any other type of KMS key.

    \n

    To create a multi-Region primary key with imported key material, use the\n Origin parameter of CreateKey with a value of\n EXTERNAL and the MultiRegion parameter with a value of\n True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

    \n

    \n
    \n
    Custom key store
    \n
    \n

    To create a symmetric encryption KMS key in a custom key store, use the\n CustomKeyStoreId parameter to specify the custom key store. You must also\n use the Origin parameter with a value of AWS_CLOUDHSM. The\n CloudHSM cluster that is associated with the custom key store must have at least two active\n HSMs in different Availability Zones in the Amazon Web Services Region.

    \n

    Custom key stores support only symmetric encryption KMS keys. You cannot create an\n HMAC KMS key or an asymmetric KMS key in a custom key store. For information about\n custom key stores in KMS see Custom key stores in KMS in\n the \n Key Management Service Developer Guide\n .

    \n
    \n
    \n

    \n Cross-account use: No. You cannot use this operation to\n create a KMS key in a different Amazon Web Services account.

    \n\n

    \n Required permissions: kms:CreateKey (IAM policy). To use the\n Tags parameter, kms:TagResource (IAM policy). For examples and information about related\n permissions, see Allow a user to create\n KMS keys in the Key Management Service Developer Guide.

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#CreateKeyRequest": { @@ -692,7 +693,7 @@ "Policy": { "target": "com.amazonaws.kms#PolicyType", "traits": { - "smithy.api#documentation": "

    The key policy to attach to the KMS key. If you do not specify a key policy, KMS attaches a default key policy to the KMS key.\n For more information, see Default key policy in the\n Key Management Service Developer Guide.

    \n

    If you provide a key policy, it must meet the following criteria:

    \n
      \n
    • \n

      If you don't set BypassPolicyLockoutSafetyCheck to True, the key policy\n must allow the principal that is making the CreateKey request to make a\n subsequent PutKeyPolicy request on the KMS key. This reduces the risk\n that the KMS key becomes unmanageable. For more information, refer to the scenario in the\n Default Key Policy section of the \n Key Management Service Developer Guide\n .

      \n
    • \n
    • \n

      Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services\n Identity and Access Management User Guide.

      \n
    • \n
    \n \n

    A key policy document must conform to the following rules.

    \n
      \n
    • \n

      Up to 32 kilobytes (32768 bytes)

      \n
    • \n
    • \n

      Must be UTF-8 encoded

      \n
    • \n
    • \n

      The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.

      \n
    • \n
    • \n

      The Sid element in a key policy statement can include spaces. (Spaces are\n prohibited in the Sid element of an IAM policy document.)

      \n
    • \n
    \n

    For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the \n Identity and Access Management User Guide\n .

    " + "smithy.api#documentation": "

    The key policy to attach to the KMS key. If you do not specify a key policy, KMS attaches a default key policy to the KMS key.\n For more information, see Default key policy in the\n Key Management Service Developer Guide.

    \n

    If you provide a key policy, it must meet the following criteria:

    \n
      \n
    • \n

      If you don't set BypassPolicyLockoutSafetyCheck to True, the key policy\n must allow the principal that is making the CreateKey request to make a\n subsequent PutKeyPolicy request on the KMS key. This reduces the risk\n that the KMS key becomes unmanageable. For more information, refer to the scenario in the\n Default Key Policy section of the \n Key Management Service Developer Guide\n .

      \n
    • \n
    • \n

      Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services\n Identity and Access Management User Guide.

      \n
    • \n
    \n \n

    A key policy document can include only the following characters:

    \n
      \n
    • \n

      Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

      \n
    • \n
    • \n

      Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

      \n
    • \n
    • \n

      The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

      \n
    • \n
    \n

    For information about key policies, see Key policies in KMS in the\n Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the \n Identity and Access Management User Guide\n .

    " } }, "Description": { @@ -704,7 +705,7 @@ "KeyUsage": { "target": "com.amazonaws.kms#KeyUsageType", "traits": { - "smithy.api#documentation": "

    Determines the cryptographic operations for which you can use the KMS key. The default value is\n ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric\n encryption KMS key; otherwise, it is required. You\n can't change the KeyUsage value after the KMS key is created.

    \n

    Select only one valid value.

    \n
      \n
    • \n

      For symmetric encryption KMS keys, omit the parameter or specify\n ENCRYPT_DECRYPT.

      \n
    • \n
    • \n

      For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

      \n
    • \n
    • \n

      For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT or\n SIGN_VERIFY.

      \n
    • \n
    • \n

      For asymmetric KMS keys with ECC key material, specify\n SIGN_VERIFY.

      \n
    • \n
    " + "smithy.api#documentation": "

    Determines the cryptographic operations for which you can use the KMS key. The default value is\n ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric\n encryption KMS key; otherwise, it is required. You\n can't change the KeyUsage value after the KMS key is created.

    \n

    Select only one valid value.

    \n
      \n
    • \n

      For symmetric encryption KMS keys, omit the parameter or specify\n ENCRYPT_DECRYPT.

      \n
    • \n
    • \n

      For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

      \n
    • \n
    • \n

      For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT or\n SIGN_VERIFY.

      \n
    • \n
    • \n

      For asymmetric KMS keys with ECC key material, specify\n SIGN_VERIFY.

      \n
    • \n
    • \n

      For asymmetric KMS keys with SM2 key material (China Regions only), specify ENCRYPT_DECRYPT or\n SIGN_VERIFY.

      \n
    • \n
    " } }, "CustomerMasterKeySpec": { @@ -719,7 +720,7 @@ "KeySpec": { "target": "com.amazonaws.kms#KeySpec", "traits": { - "smithy.api#documentation": "

    Specifies the type of KMS key to create. The default value,\n SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit symmetric key for encryption\n and decryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the \n Key Management Service Developer Guide\n .

    \n

    The KeySpec determines whether the KMS key contains a symmetric key or an\n asymmetric key pair. It also determines the cryptographic algorithms that the KMS key supports. You can't\n change the KeySpec after the KMS key is created.\n To further restrict the algorithms that can be used with the KMS key, use a condition key in\n its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the \n Key Management Service Developer Guide\n .

    \n \n

    \n Amazon Web Services services that\n are integrated with KMS use symmetric encryption KMS keys to protect your data.\n These services do not support asymmetric KMS keys or HMAC KMS keys.

    \n
    \n

    KMS supports the following key specs for KMS keys:

    \n
      \n
    • \n

      Symmetric encryption key (default)

      \n
        \n
      • \n

        \n SYMMETRIC_DEFAULT (AES-256-GCM)

        \n
      • \n
      \n
    • \n
    • \n

      HMAC keys (symmetric)

      \n
        \n
      • \n

        \n HMAC_224\n

        \n
      • \n
      • \n

        \n HMAC_256\n

        \n
      • \n
      • \n

        \n HMAC_384\n

        \n
      • \n
      • \n

        \n HMAC_512\n

        \n
      • \n
      \n
    • \n
    • \n

      Asymmetric RSA key pairs

      \n
        \n
      • \n

        \n RSA_2048\n

        \n
      • \n
      • \n

        \n RSA_3072\n

        \n
      • \n
      • \n

        \n RSA_4096\n

        \n
      • \n
      \n
    • \n
    • \n

      Asymmetric NIST-recommended elliptic curve key pairs

      \n
        \n
      • \n

        \n ECC_NIST_P256 (secp256r1)

        \n
      • \n
      • \n

        \n ECC_NIST_P384 (secp384r1)

        \n
      • \n
      • \n

        \n ECC_NIST_P521 (secp521r1)

        \n
      • \n
      \n
    • \n
    • \n

      Other asymmetric elliptic curve key pairs

      \n
        \n
      • \n

        \n ECC_SECG_P256K1 (secp256k1), commonly used for\n cryptocurrencies.

        \n
      • \n
      \n
    • \n
    " + "smithy.api#documentation": "

    Specifies the type of KMS key to create. The default value,\n SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, \n where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the \n Key Management Service Developer Guide\n .

    \n

    The KeySpec determines whether the KMS key contains a symmetric key or an\n asymmetric key pair. It also determines the cryptographic algorithms that the KMS key supports. You can't\n change the KeySpec after the KMS key is created.\n To further restrict the algorithms that can be used with the KMS key, use a condition key in\n its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the \n Key Management Service Developer Guide\n .

    \n \n

    \n Amazon Web Services services that\n are integrated with KMS use symmetric encryption KMS keys to protect your data.\n These services do not support asymmetric KMS keys or HMAC KMS keys.

    \n
    \n

    KMS supports the following key specs for KMS keys:

    \n
      \n
    • \n

      Symmetric encryption key (default)

      \n
        \n
      • \n

        \n SYMMETRIC_DEFAULT\n

        \n
      • \n
      \n
    • \n
    • \n

      HMAC keys (symmetric)

      \n
        \n
      • \n

        \n HMAC_224\n

        \n
      • \n
      • \n

        \n HMAC_256\n

        \n
      • \n
      • \n

        \n HMAC_384\n

        \n
      • \n
      • \n

        \n HMAC_512\n

        \n
      • \n
      \n
    • \n
    • \n

      Asymmetric RSA key pairs

      \n
        \n
      • \n

        \n RSA_2048\n

        \n
      • \n
      • \n

        \n RSA_3072\n

        \n
      • \n
      • \n

        \n RSA_4096\n

        \n
      • \n
      \n
    • \n
    • \n

      Asymmetric NIST-recommended elliptic curve key pairs

      \n
        \n
      • \n

        \n ECC_NIST_P256 (secp256r1)

        \n
      • \n
      • \n

        \n ECC_NIST_P384 (secp384r1)

        \n
      • \n
      • \n

        \n ECC_NIST_P521 (secp521r1)

        \n
      • \n
      \n
    • \n
    • \n

      Other asymmetric elliptic curve key pairs

      \n
        \n
      • \n

        \n ECC_SECG_P256K1 (secp256k1), commonly used for\n cryptocurrencies.

        \n
      • \n
      \n
    • \n
    • \n

      SM2 key pairs (China Regions only)

      \n
        \n
      • \n

        \n SM2\n

        \n
      • \n
      \n
    • \n
    " } }, "Origin": { @@ -731,7 +732,7 @@ "CustomKeyStoreId": { "target": "com.amazonaws.kms#CustomKeyStoreIdType", "traits": { - "smithy.api#documentation": "

    Creates the KMS key in the specified custom key store and the key material in its\n associated CloudHSM cluster. To create a KMS key in a custom key store, you must also specify the\n Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster\n that is associated with the custom key store must have at least two active HSMs, each in a\n different Availability Zone in the Region.

    \n

    This parameter is valid only for symmetric encryption KMS keys in a single Region. You \n cannot create any other type of KMS key in a custom key store.

    \n

    To find the ID of a custom key store, use the DescribeCustomKeyStores operation.

    \n

    The response includes the custom key store ID and the ID of the CloudHSM cluster.

    \n

    This operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    " + "smithy.api#documentation": "

    Creates the KMS key in the specified custom key store and the key material in its\n associated CloudHSM cluster. To create a KMS key in a custom key store, you must also specify the\n Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster\n that is associated with the custom key store must have at least two active HSMs, each in a\n different Availability Zone in the Region.

    \n

    This parameter is valid only for symmetric encryption KMS keys in a single Region. You \n cannot create any other type of KMS key in a custom key store.

    \n

    To find the ID of a custom key store, use the DescribeCustomKeyStores operation.

    \n

    The response includes the custom key store ID and the ID of the CloudHSM cluster.

    \n

    This operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    " } }, "BypassPolicyLockoutSafetyCheck": { @@ -961,6 +962,10 @@ { "value": "HMAC_512", "name": "HMAC_512" + }, + { + "value": "SM2", + "name": "SM2" } ] } @@ -996,6 +1001,10 @@ { "value": "ECC_SECG_P256K1", "name": "ECC_SECG_P256K1" + }, + { + "value": "SM2", + "name": "SM2" } ] } @@ -1182,7 +1191,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Deletes a custom key store. This operation does not delete the CloudHSM cluster that is\n associated with the custom key store, or affect any users or keys in the cluster.

    \n

    The custom key store that you delete cannot contain any KMS KMS keys. Before deleting the key store,\n verify that you will never need to use any of the KMS keys in the key store for any\n cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the\n key store. When the scheduled waiting period expires, the ScheduleKeyDeletion\n operation deletes the KMS keys. Then it makes a best effort to delete the key material from\n the associated cluster. However, you might need to manually delete the orphaned key\n material from the cluster and its backups.

    \n

    After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore\n to disconnect the key store from KMS. Then, you can delete the custom key store.

    \n

    Instead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from KMS. While the key store is\n disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to\n delete KMS keys and you can reconnect a disconnected custom key store at any time.

    \n

    If the operation succeeds, it returns a JSON object with no\nproperties.

    \n

    This operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n

    \n Cross-account use: No.\n You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n\n

    \n Required permissions: kms:DeleteCustomKeyStore (IAM policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Deletes a custom key store. This operation does not delete the CloudHSM cluster that is\n associated with the custom key store, or affect any users or keys in the cluster.

    \n

    The custom key store that you delete cannot contain any KMS keys. Before deleting the key store,\n verify that you will never need to use any of the KMS keys in the key store for any\n cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the\n key store. When the scheduled waiting period expires, the ScheduleKeyDeletion\n operation deletes the KMS keys. Then it makes a best effort to delete the key material from\n the associated cluster. However, you might need to manually delete the orphaned key\n material from the cluster and its backups.

    \n

    After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore\n to disconnect the key store from KMS. Then, you can delete the custom key store.

    \n

    Instead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from KMS. While the key store is\n disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to\n delete KMS keys and you can reconnect a disconnected custom key store at any time.

    \n

    If the operation succeeds, it returns a JSON object with no\nproperties.

    \n

    This operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n

    \n Cross-account use: No.\n You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n\n

    \n Required permissions: kms:DeleteCustomKeyStore (IAM policy)

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#DeleteCustomKeyStoreRequest": { @@ -1282,7 +1291,13 @@ } ], "traits": { - "smithy.api#documentation": "

    Gets information about custom key stores in the account and Region.

    \n

    This operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n

    By default, this operation returns information about all custom key\n stores in the account and Region. To get only information about a particular custom key store,\n use either the CustomKeyStoreName or CustomKeyStoreId parameter (but\n not both).

    \n

    To determine whether the custom key store is connected to its CloudHSM cluster, use the\n ConnectionState element in the response. If an attempt to connect the custom\n key store failed, the ConnectionState value is FAILED and the\n ConnectionErrorCode element in the response indicates the cause of the failure.\n For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

    \n

    Custom key stores have a DISCONNECTED connection state if the key store has\n never been connected or you use the DisconnectCustomKeyStore operation to\n disconnect it. If your custom key store state is CONNECTED but you are having\n trouble using it, make sure that its associated CloudHSM cluster is active and contains the\n minimum number of HSMs required for the operation, if any.

    \n

    For help repairing your custom key store, see the Troubleshooting Custom Key Stores topic in the\n Key Management Service Developer Guide.

    \n

    \n Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n

    \n Required permissions: kms:DescribeCustomKeyStores (IAM policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Gets information about custom key stores in the account and Region.

    \n

    This operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n

    By default, this operation returns information about all custom key\n stores in the account and Region. To get only information about a particular custom key store,\n use either the CustomKeyStoreName or CustomKeyStoreId parameter (but\n not both).

    \n

    To determine whether the custom key store is connected to its CloudHSM cluster, use the\n ConnectionState element in the response. If an attempt to connect the custom\n key store failed, the ConnectionState value is FAILED and the\n ConnectionErrorCode element in the response indicates the cause of the failure.\n For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

    \n

    Custom key stores have a DISCONNECTED connection state if the key store has\n never been connected or you use the DisconnectCustomKeyStore operation to\n disconnect it. If your custom key store state is CONNECTED but you are having\n trouble using it, make sure that its associated CloudHSM cluster is active and contains the\n minimum number of HSMs required for the operation, if any.

    \n

    For help repairing your custom key store, see the Troubleshooting Custom Key Stores topic in the\n Key Management Service Developer Guide.

    \n

    \n Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n

    \n Required permissions: kms:DescribeCustomKeyStores (IAM policy)

    \n

    \n Related operations:\n

    \n ", + "smithy.api#paginated": { + "inputToken": "Marker", + "outputToken": "NextMarker", + "items": "CustomKeyStores", + "pageSize": "Limit" + } } }, "com.amazonaws.kms#DescribeCustomKeyStoresRequest": { @@ -1360,7 +1375,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Provides detailed information about a KMS key. You can run DescribeKey on a\n customer managed\n key or an Amazon Web Services managed key.

    \n

    This detailed information includes the key ARN, creation date (and deletion date, if\n applicable), the key state, and the origin and expiration date (if any) of the key material.\n It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports. For KMS keys in custom key stores, it includes\n information about the custom key store, such as the key store ID and the CloudHSM cluster ID. For\n multi-Region keys, it displays the primary key and all related replica keys.

    \n

    \n DescribeKey does not return the following information:

    \n
      \n
    • \n

      Aliases associated with the KMS key. To get this information, use ListAliases.

      \n
    • \n
    • \n

      Whether automatic key rotation is enabled on the KMS key. To get this information, use\n GetKeyRotationStatus. Also, some key states prevent a KMS key from\n being automatically rotated. For details, see How Automatic Key Rotation\n Works in Key Management Service Developer Guide.

      \n
    • \n
    • \n

      Tags on the KMS key. To get this information, use ListResourceTags.

      \n
    • \n
    • \n

      Key policies and grants on the KMS key. To get this information, use GetKeyPolicy and ListGrants.

      \n
    • \n
    \n

    In general, DescribeKey is a non-mutating operation. It returns data about\n KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to\n create Amazon Web Services\n managed keys from a predefined Amazon Web Services alias with no key\n ID.

    \n

    \n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:DescribeKey (key policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Provides detailed information about a KMS key. You can run DescribeKey on a\n customer managed\n key or an Amazon Web Services managed key.

    \n

    This detailed information includes the key ARN, creation date (and deletion date, if\n applicable), the key state, and the origin and expiration date (if any) of the key material.\n It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports. For KMS keys in custom key stores, it includes\n information about the custom key store, such as the key store ID and the CloudHSM cluster ID. For\n multi-Region keys, it displays the primary key and all related replica keys.

    \n

    \n DescribeKey does not return the following information:

    \n
      \n
    • \n

      Aliases associated with the KMS key. To get this information, use ListAliases.

      \n
    • \n
    • \n

      Whether automatic key rotation is enabled on the KMS key. To get this information, use\n GetKeyRotationStatus. Also, some key states prevent a KMS key from\n being automatically rotated. For details, see How Automatic Key Rotation\n Works in the Key Management Service Developer Guide.

      \n
    • \n
    • \n

      Tags on the KMS key. To get this information, use ListResourceTags.

      \n
    • \n
    • \n

      Key policies and grants on the KMS key. To get this information, use GetKeyPolicy and ListGrants.

      \n
    • \n
    \n

    In general, DescribeKey is a non-mutating operation. It returns data about\n KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to\n create Amazon Web Services\n managed keys from a predefined Amazon Web Services alias with no key\n ID.

    \n

    \n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:DescribeKey (key policy)

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#DescribeKeyRequest": { @@ -1526,7 +1541,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Disconnects the custom key store from its associated CloudHSM cluster. While a custom key\n store is disconnected, you can manage the custom key store and its KMS keys, but you cannot\n create or use KMS keys in the custom key store. You can reconnect the custom key store at any\n time.

    \n \n

    While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will\n fail. This action can prevent users from storing and accessing sensitive data.

    \n
    \n

    \n

    To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the\n ConnectCustomKeyStore operation.

    \n

    If the operation succeeds, it returns a JSON object with no\nproperties.

    \n

    This operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n\n

    \n Cross-account use: No.\n You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n\n

    \n Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Disconnects the custom key store from its associated CloudHSM cluster. While a custom key\n store is disconnected, you can manage the custom key store and its KMS keys, but you cannot\n create or use KMS keys in the custom key store. You can reconnect the custom key store at any\n time.

    \n \n

    While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will\n fail. This action can prevent users from storing and accessing sensitive data.

    \n
    \n

    \n

    To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the\n ConnectCustomKeyStore operation.

    \n

    If the operation succeeds, it returns a JSON object with no\nproperties.

    \n

    This operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n\n

    \n Cross-account use: No.\n You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n\n

    \n Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#DisconnectCustomKeyStoreRequest": { @@ -1671,7 +1686,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or\n asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT.

    \n

    You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or\n database password, or other sensitive information. You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a\n plaintext data key and an encrypted copy of that data key.

    \n\n

    If you use a symmetric encryption KMS key, you can use an encryption context to add additional\n security to your encryption operation. If you specify an EncryptionContext when\n encrypting data, you must specify the same encryption context (a case-sensitive exact match)\n when decrypting the data. Otherwise, the request to decrypt fails with an\n InvalidCiphertextException. For more information, see Encryption\n Context in the Key Management Service Developer Guide.

    \n

    If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The\n algorithm must be compatible with the KMS key type.

    \n \n

    When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

    \n

    You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

    \n
    \n\n\n

    The maximum size of the data that you can encrypt varies with the type of KMS key and the\n encryption algorithm that you choose.

    \n
      \n
    • \n

      Symmetric encryption KMS keys

      \n
        \n
      • \n

        \n SYMMETRIC_DEFAULT: 4096 bytes

        \n
      • \n
      \n
    • \n
    • \n

      \n RSA_2048\n

      \n
        \n
      • \n

        \n RSAES_OAEP_SHA_1: 214 bytes

        \n
      • \n
      • \n

        \n RSAES_OAEP_SHA_256: 190 bytes

        \n
      • \n
      \n
    • \n
    • \n

      \n RSA_3072\n

      \n
        \n
      • \n

        \n RSAES_OAEP_SHA_1: 342 bytes

        \n
      • \n
      • \n

        \n RSAES_OAEP_SHA_256: 318 bytes

        \n
      • \n
      \n
    • \n
    • \n

      \n RSA_4096\n

      \n
        \n
      • \n

        \n RSAES_OAEP_SHA_1: 470 bytes

        \n
      • \n
      • \n

        \n RSAES_OAEP_SHA_256: 446 bytes

        \n
      • \n
      \n
    • \n
    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account use: Yes.\n To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:Encrypt (key policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or\n asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT.

    \n

    You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or\n database password, or other sensitive information. You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a\n plaintext data key and an encrypted copy of that data key.

    \n\n

    If you use a symmetric encryption KMS key, you can use an encryption context to add additional\n security to your encryption operation. If you specify an EncryptionContext when\n encrypting data, you must specify the same encryption context (a case-sensitive exact match)\n when decrypting the data. Otherwise, the request to decrypt fails with an\n InvalidCiphertextException. For more information, see Encryption\n Context in the Key Management Service Developer Guide.

    \n

    If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The\n algorithm must be compatible with the KMS key spec.

    \n \n

    When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

    \n

    You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

    \n
    \n\n\n

    The maximum size of the data that you can encrypt varies with the type of KMS key and the\n encryption algorithm that you choose.

    \n
      \n
    • \n

      Symmetric encryption KMS keys

      \n
        \n
      • \n

        \n SYMMETRIC_DEFAULT: 4096 bytes

        \n
      • \n
      \n
    • \n
    • \n

      \n RSA_2048\n

      \n
        \n
      • \n

        \n RSAES_OAEP_SHA_1: 214 bytes

        \n
      • \n
      • \n

        \n RSAES_OAEP_SHA_256: 190 bytes

        \n
      • \n
      \n
    • \n
    • \n

      \n RSA_3072\n

      \n
        \n
      • \n

        \n RSAES_OAEP_SHA_1: 342 bytes

        \n
      • \n
      • \n

        \n RSAES_OAEP_SHA_256: 318 bytes

        \n
      • \n
      \n
    • \n
    • \n

      \n RSA_4096\n

      \n
        \n
      • \n

        \n RSAES_OAEP_SHA_1: 470 bytes

        \n
      • \n
      • \n

        \n RSAES_OAEP_SHA_256: 446 bytes

        \n
      • \n
      \n
    • \n
    • \n

      \n SM2PKE: 1024 bytes (China Regions only)

      \n
    • \n
    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account use: Yes.\n To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:Encrypt (key policy)

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#EncryptRequest": { @@ -1749,6 +1764,10 @@ { "value": "RSAES_OAEP_SHA_256", "name": "RSAES_OAEP_SHA_256" + }, + { + "value": "SM2PKE", + "name": "SM2PKE" } ] } @@ -1844,7 +1863,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Returns a unique symmetric data key for use outside of KMS. This operation returns a\n plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS\n key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS\n key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted\n data key with the encrypted data.

    \n\n

    To generate a data key, specify the symmetric encryption KMS key that will be used to\n encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the\n type of your KMS key, use the DescribeKey operation. You must also specify the length of\n the data key. Use either the KeySpec or NumberOfBytes parameters\n (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

    \n\n

    To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use\n the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure\n random byte string, use GenerateRandom.

    \n\n

    You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the\n Key Management Service Developer Guide.

    \n

    Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n How to use your data\n key\n

    \n

    We recommend that you use the following pattern to encrypt data locally in your\n application. You can write your own code or use a client-side encryption library, such as the\n Amazon Web Services Encryption SDK, the\n Amazon DynamoDB Encryption Client,\n or Amazon S3\n client-side encryption to do these tasks for you.

    \n

    To encrypt data outside of KMS:

    \n
      \n
    1. \n

      Use the GenerateDataKey operation to get a data key.

      \n
    2. \n
    3. \n

      Use the plaintext data key (in the Plaintext field of the response) to\n encrypt your data outside of KMS. Then erase the plaintext data key from memory.

      \n
    4. \n
    5. \n

      Store the encrypted data key (in the CiphertextBlob field of the\n response) with the encrypted data.

      \n
    6. \n
    \n

    To decrypt data outside of KMS:

    \n
      \n
    1. \n

      Use the Decrypt operation to decrypt the encrypted data key. The\n operation returns a plaintext copy of the data key.

      \n
    2. \n
    3. \n

      Use the plaintext data key to decrypt data outside of KMS, then erase the plaintext\n data key from memory.

      \n
    4. \n
    \n

    \n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:GenerateDataKey (key policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Returns a unique symmetric data key for use outside of KMS. This operation returns a\n plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS\n key that you specify. The bytes in the plaintext key are random; they are not related \n to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS \n and store the encrypted data key with the encrypted data.

    \n\n

    To generate a data key, specify the symmetric encryption KMS key that will be used to\n encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the\n type of your KMS key, use the DescribeKey operation.

    \n \n

    You must also specify the length of the data key. Use either the KeySpec or \n NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use \n the KeySpec parameter.

    \n \n

    To generate an SM4 data key (China Regions only), specify a KeySpec value of\n AES_128 or NumberOfBytes value of 128. The symmetric \n encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

    \n\n

    To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use\n the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure\n random byte string, use GenerateRandom.

    \n\n

    You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the\n Key Management Service Developer Guide.

    \n

    Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n How to use your data\n key\n

    \n

    We recommend that you use the following pattern to encrypt data locally in your\n application. You can write your own code or use a client-side encryption library, such as the\n Amazon Web Services Encryption SDK, the\n Amazon DynamoDB Encryption Client,\n or Amazon S3\n client-side encryption to do these tasks for you.

    \n

    To encrypt data outside of KMS:

    \n
      \n
    1. \n

      Use the GenerateDataKey operation to get a data key.

      \n
    2. \n
    3. \n

      Use the plaintext data key (in the Plaintext field of the response) to\n encrypt your data outside of KMS. Then erase the plaintext data key from memory.

      \n
    4. \n
    5. \n

      Store the encrypted data key (in the CiphertextBlob field of the\n response) with the encrypted data.

      \n
    6. \n
    \n

    To decrypt data outside of KMS:

    \n
      \n
    1. \n

      Use the Decrypt operation to decrypt the encrypted data key. The\n operation returns a plaintext copy of the data key.

      \n
    2. \n
    3. \n

      Use the plaintext data key to decrypt data outside of KMS, then erase the plaintext\n data key from memory.

      \n
    4. \n
    \n

    \n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:GenerateDataKey (key policy)

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#GenerateDataKeyPair": { @@ -1885,7 +1904,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Returns a unique asymmetric data key pair for use outside of KMS. This operation returns\n a plaintext public key, a plaintext private key, and a copy of the private key that is\n encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to\n perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes\n in the keys are random; they not related to the caller or to the KMS key that is used to encrypt the\n private key.

    \n\n

    You can use the public key that GenerateDataKeyPair returns to encrypt data\n or verify a signature outside of KMS. Then, store the encrypted private key with the data.\n When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

    \n\n

    To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt\n the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a\n custom key store. To get the type and origin of your KMS key, use the DescribeKey\n operation.

    \n

    Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data\n key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for\n either encryption or signing, but not both. However, KMS cannot enforce any restrictions on\n the use of data key pairs outside of KMS.

    \n\n

    If you are using the data key pair to encrypt data, or for any operation where you don't\n immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation.\n GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an\n encrypted private key, but omits the plaintext private key that you need only to decrypt\n ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use\n the Decrypt operation to decrypt the encrypted private key in the data key\n pair.

    \n\n

    \n GenerateDataKeyPair returns a unique data key pair for each request. The\n bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the\n private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in\n RFC 5280. The private key is a\n DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.

    \n\n

    You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the\n Key Management Service Developer Guide.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account\n use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:GenerateDataKeyPair (key policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Returns a unique asymmetric data key pair for use outside of KMS. This operation returns\n a plaintext public key, a plaintext private key, and a copy of the private key that is\n encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to\n perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes\n in the keys are random; they not related to the caller or to the KMS key that is used to encrypt the\n private key.

    \n\n

    You can use the public key that GenerateDataKeyPair returns to encrypt data\n or verify a signature outside of KMS. Then, store the encrypted private key with the data.\n When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

    \n\n

    To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt\n the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a\n custom key store. To get the type and origin of your KMS key, use the DescribeKey\n operation.

    \n

    Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data\n key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use\n ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both.\n However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

    \n\n

    If you are using the data key pair to encrypt data, or for any operation where you don't\n immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation.\n GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an\n encrypted private key, but omits the plaintext private key that you need only to decrypt\n ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use\n the Decrypt operation to decrypt the encrypted private key in the data key\n pair.

    \n\n

    \n GenerateDataKeyPair returns a unique data key pair for each request. The\n bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the\n private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in\n RFC 5280. The private key is a\n DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.

    \n\n

    You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the\n Key Management Service Developer Guide.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account\n use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:GenerateDataKeyPair (key policy)

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#GenerateDataKeyPairRequest": { @@ -1907,7 +1926,7 @@ "KeyPairSpec": { "target": "com.amazonaws.kms#DataKeyPairSpec", "traits": { - "smithy.api#documentation": "

    Determines the type of data key pair that is generated.

    \n

    The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS.

    ", + "smithy.api#documentation": "

    Determines the type of data key pair that is generated.

    \n

    The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.

    ", "smithy.api#required": {} } }, @@ -1992,7 +2011,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Returns a unique asymmetric data key pair for use outside of KMS. This operation returns\n a plaintext public key and a copy of the private key that is encrypted under the symmetric\n encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation\n does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller\n or to the KMS key that is used to encrypt the private key.

    \n

    You can use the public key that GenerateDataKeyPairWithoutPlaintext returns\n to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key\n with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

    \n

    To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt\n the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a\n custom key store. To get the type and origin of your KMS key, use the DescribeKey\n operation.

    \n

    Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data\n key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for\n either encryption or signing, but not both. However, KMS cannot enforce any restrictions on\n the use of data key pairs outside of KMS.

    \n

    \n GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each\n request. The bytes in the key are not related to the caller or KMS key that is used to encrypt\n the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in\n RFC 5280.

    \n\n

    You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the\n Key Management Service Developer Guide.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account\n use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key\n policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Returns a unique asymmetric data key pair for use outside of KMS. This operation returns\n a plaintext public key and a copy of the private key that is encrypted under the symmetric\n encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation\n does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller\n or to the KMS key that is used to encrypt the private key.

    \n

    You can use the public key that GenerateDataKeyPairWithoutPlaintext returns\n to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key\n with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

    \n

    To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt\n the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a\n custom key store. To get the type and origin of your KMS key, use the DescribeKey\n operation.

    \n

    Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data\n key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you \n use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not\n both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

    \n

    \n GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each\n request. The bytes in the key are not related to the caller or KMS key that is used to encrypt\n the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in\n RFC 5280.

    \n\n

    You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the\n Key Management Service Developer Guide.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account\n use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key\n policy)

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#GenerateDataKeyPairWithoutPlaintextRequest": { @@ -2014,7 +2033,7 @@ "KeyPairSpec": { "target": "com.amazonaws.kms#DataKeyPairSpec", "traits": { - "smithy.api#documentation": "

    Determines the type of data key pair that is generated.

    \n

    The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS.

    ", + "smithy.api#documentation": "

    Determines the type of data key pair that is generated.

    \n

    The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.

    ", "smithy.api#required": {} } }, @@ -2318,7 +2337,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Returns a random byte string that is cryptographically secure.

    \n

    By default, the random byte string is generated in KMS. To generate the byte string in\n the CloudHSM cluster that is associated with a custom key store, specify the custom key store\n ID.

    \n

    Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

    \n

    For more information about entropy and random number generation, see\n Key Management Service Cryptographic Details.

    \n\n

    \n Required permissions: kms:GenerateRandom (IAM policy)

    " + "smithy.api#documentation": "

    Returns a random byte string that is cryptographically secure.

    \n

    You must use the NumberOfBytes parameter to specify the length of the random\n byte string. There is no default value for string length.

    \n

    By default, the random byte string is generated in KMS. To generate the byte string in\n the CloudHSM cluster that is associated with a custom key store, specify the custom key store\n ID.

    \n

    Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.

    \n

    For more information about entropy and random number generation, see\n Key Management Service Cryptographic Details.

    \n

    \n Cross-account use: Not applicable. GenerateRandom does not use any account-specific resources, such as KMS keys.

    \n

    \n Required permissions: kms:GenerateRandom (IAM policy)

    " } }, "com.amazonaws.kms#GenerateRandomRequest": { @@ -2327,7 +2346,7 @@ "NumberOfBytes": { "target": "com.amazonaws.kms#NumberOfBytesType", "traits": { - "smithy.api#documentation": "

    The length of the byte string.

    " + "smithy.api#documentation": "

    The length of the random byte string. This parameter is required.

    " } }, "CustomKeyStoreId": { @@ -2591,7 +2610,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric\n KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey\n permission can download the public key of an asymmetric KMS key. You can share the public key\n to allow others to encrypt messages and verify signatures outside of KMS.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

    \n

    You do not need to download the public key. Instead, you can use the public key within\n KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the\n public key within KMS, you benefit from the authentication, authorization, and logging that\n are part of every KMS operation. You also reduce of risk of encrypting data that cannot be\n decrypted. These features are not effective outside of KMS. For details, see Special\n Considerations for Downloading Public Keys.

    \n

    To help you use the public key safely outside of KMS, GetPublicKey returns\n important information about the public key in the response, including:

    \n
      \n
    • \n

      \n KeySpec: The type of key material in the public key, such as\n RSA_4096 or ECC_NIST_P521.

      \n
    • \n
    • \n

      \n KeyUsage: Whether the key is used for encryption or signing.

      \n
    • \n
    • \n

      \n EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing\n algorithms for the key.

      \n
    • \n
    \n

    Although KMS cannot enforce these restrictions on external operations, it is crucial\n that you use this information to prevent the public key from being used improperly. For\n example, you can prevent a public signing key from being used encrypt data, or prevent a\n public key from being used with an encryption algorithm that is not supported by KMS. You\n can also avoid errors, such as using the wrong signing algorithm in a verification\n operation.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account use:\n Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:GetPublicKey (key policy)

    \n

    \n Related operations: CreateKey\n

    " + "smithy.api#documentation": "

    Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric\n KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey\n permission can download the public key of an asymmetric KMS key. You can share the public key\n to allow others to encrypt messages and verify signatures outside of KMS.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

    \n

    You do not need to download the public key. Instead, you can use the public key within\n KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the\n public key within KMS, you benefit from the authentication, authorization, and logging that\n are part of every KMS operation. You also reduce of risk of encrypting data that cannot be\n decrypted. These features are not effective outside of KMS.

    \n

    To verify a signature outside of KMS with an SM2 public key (China Regions only), you must \n specify the distinguishing ID. By default, KMS uses 1234567812345678 as the \n distinguishing ID. For more information, see Offline verification\n with SM2 key pairs.

    \n

    To help you use the public key safely outside of KMS, GetPublicKey returns\n important information about the public key in the response, including:

    \n
      \n
    • \n

      \n KeySpec: The type of key material in the public key, such as\n RSA_4096 or ECC_NIST_P521.

      \n
    • \n
    • \n

      \n KeyUsage: Whether the key is used for encryption or signing.

      \n
    • \n
    • \n

      \n EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing\n algorithms for the key.

      \n
    • \n
    \n

    Although KMS cannot enforce these restrictions on external operations, it is crucial\n that you use this information to prevent the public key from being used improperly. For\n example, you can prevent a public signing key from being used encrypt data, or prevent a\n public key from being used with an encryption algorithm that is not supported by KMS. You\n can also avoid errors, such as using the wrong signing algorithm in a verification\n operation.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account use:\n Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:GetPublicKey (key policy)

    \n

    \n Related operations: CreateKey\n

    " } }, "com.amazonaws.kms#GetPublicKeyRequest": { @@ -3459,6 +3478,10 @@ { "value": "HMAC_512", "name": "HMAC_512" + }, + { + "value": "SM2", + "name": "SM2" } ] } @@ -3930,7 +3953,13 @@ } ], "traits": { - "smithy.api#documentation": "

    Returns all tags on the specified KMS key.

    \n

    For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in\n the Amazon Web Services General Reference. For information about using\n tags in KMS, see Tagging\n keys.

    \n

    \n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

    \n\n

    \n Required permissions: kms:ListResourceTags (key policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Returns all tags on the specified KMS key.

    \n

    For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in\n the Amazon Web Services General Reference. For information about using\n tags in KMS, see Tagging\n keys.

    \n

    \n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

    \n\n

    \n Required permissions: kms:ListResourceTags (key policy)

    \n

    \n Related operations:\n

    \n ", + "smithy.api#paginated": { + "inputToken": "Marker", + "outputToken": "NextMarker", + "items": "Tags", + "pageSize": "Limit" + } } }, "com.amazonaws.kms#ListResourceTagsRequest": { @@ -4006,7 +4035,13 @@ } ], "traits": { - "smithy.api#documentation": "

    Returns information about all grants in the Amazon Web Services account and Region that have the\n specified retiring principal.

    \n

    You can specify any principal in your Amazon Web Services account. The grants that are returned include\n grants for KMS keys in your Amazon Web Services account and other Amazon Web Services accounts. You might use this\n operation to determine which grants you may retire. To retire a grant, use the RetireGrant operation.

    \n

    For detailed information about grants, including grant terminology, see Grants in KMS in the\n \n Key Management Service Developer Guide\n . For examples of working with grants in several\n programming languages, see Programming grants.

    \n

    \n Cross-account use: You must specify a principal in your\n Amazon Web Services account. However, this operation can return grants in any Amazon Web Services account. You do not need\n kms:ListRetirableGrants permission (or any other additional permission) in any\n Amazon Web Services account other than your own.

    \n\n

    \n Required permissions: kms:ListRetirableGrants (IAM policy) in your\n Amazon Web Services account.

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Returns information about all grants in the Amazon Web Services account and Region that have the\n specified retiring principal.

    \n

    You can specify any principal in your Amazon Web Services account. The grants that are returned include\n grants for KMS keys in your Amazon Web Services account and other Amazon Web Services accounts. You might use this\n operation to determine which grants you may retire. To retire a grant, use the RetireGrant operation.

    \n

    For detailed information about grants, including grant terminology, see Grants in KMS in the\n \n Key Management Service Developer Guide\n . For examples of working with grants in several\n programming languages, see Programming grants.

    \n

    \n Cross-account use: You must specify a principal in your\n Amazon Web Services account. However, this operation can return grants in any Amazon Web Services account. You do not need\n kms:ListRetirableGrants permission (or any other additional permission) in any\n Amazon Web Services account other than your own.

    \n\n

    \n Required permissions: kms:ListRetirableGrants (IAM policy) in your\n Amazon Web Services account.

    \n

    \n Related operations:\n

    \n ", + "smithy.api#paginated": { + "inputToken": "Marker", + "outputToken": "NextMarker", + "items": "Grants", + "pageSize": "Limit" + } } }, "com.amazonaws.kms#ListRetirableGrantsRequest": { @@ -4346,7 +4381,7 @@ "Policy": { "target": "com.amazonaws.kms#PolicyType", "traits": { - "smithy.api#documentation": "

    The key policy to attach to the KMS key.

    \n

    The key policy must meet the following criteria:

    \n
      \n
    • \n

      If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy\n must allow the principal that is making the PutKeyPolicy request to make a\n subsequent PutKeyPolicy request on the KMS key. This reduces the risk that\n the KMS key becomes unmanageable. For more information, refer to the scenario in the\n Default Key Policy section of the Key Management Service Developer Guide.

      \n
    • \n
    • \n

      Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services\n Identity and Access Management User Guide.

      \n
    • \n
    \n \n

    A key policy document must conform to the following rules.

    \n
      \n
    • \n

      Up to 32 kilobytes (32768 bytes)

      \n
    • \n
    • \n

      Must be UTF-8 encoded

      \n
    • \n
    • \n

      The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.

      \n
    • \n
    • \n

      The Sid element in a key policy statement can include spaces. (Spaces are\n prohibited in the Sid element of an IAM policy document.)

      \n
    • \n
    ", + "smithy.api#documentation": "

    The key policy to attach to the KMS key.

    \n

    The key policy must meet the following criteria:

    \n
      \n
    • \n

      If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy\n must allow the principal that is making the PutKeyPolicy request to make a\n subsequent PutKeyPolicy request on the KMS key. This reduces the risk that\n the KMS key becomes unmanageable. For more information, refer to the scenario in the\n Default Key Policy section of the Key Management Service Developer Guide.

      \n
    • \n
    • \n

      Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services\n Identity and Access Management User Guide.

      \n
    • \n
    \n \n

    A key policy document can include only the following characters:

    \n
      \n
    • \n

      Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

      \n
    • \n
    • \n

      Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

      \n
    • \n
    • \n

      The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

      \n
    • \n
    \n

    For information about key policies, see Key policies in KMS in the\n Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the \n Identity and Access Management User Guide\n .

    ", "smithy.api#required": {} } }, @@ -4566,7 +4601,7 @@ "Policy": { "target": "com.amazonaws.kms#PolicyType", "traits": { - "smithy.api#documentation": "

    The key policy to attach to the KMS key. This parameter is optional. If you do not provide\n a key policy, KMS attaches the default key policy to the\n KMS key.

    \n

    The key policy is not a shared property of multi-Region keys. You can specify the same key\n policy or a different key policy for each key in a set of related multi-Region keys. KMS\n does not synchronize this property.

    \n

    If you provide a key policy, it must meet the following criteria:

    \n
      \n
    • \n

      If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy\n must give the caller kms:PutKeyPolicy permission on the replica key. This\n reduces the risk that the KMS key becomes unmanageable. For more information, refer to the\n scenario in the Default Key Policy section of the \n Key Management Service Developer Guide\n .

      \n
    • \n
    • \n

      Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the\n \n Identity and Access Management User Guide\n .

      \n
    • \n
    \n \n \n

    A key policy document must conform to the following rules.

    \n
      \n
    • \n

      Up to 32 kilobytes (32768 bytes)

      \n
    • \n
    • \n

      Must be UTF-8 encoded

      \n
    • \n
    • \n

      The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.

      \n
    • \n
    • \n

      The Sid element in a key policy statement can include spaces. (Spaces are\n prohibited in the Sid element of an IAM policy document.)

      \n
    • \n
    " + "smithy.api#documentation": "

    The key policy to attach to the KMS key. This parameter is optional. If you do not provide\n a key policy, KMS attaches the default key policy to the\n KMS key.

    \n

    The key policy is not a shared property of multi-Region keys. You can specify the same key\n policy or a different key policy for each key in a set of related multi-Region keys. KMS\n does not synchronize this property.

    \n

    If you provide a key policy, it must meet the following criteria:

    \n
      \n
    • \n

      If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy\n must give the caller kms:PutKeyPolicy permission on the replica key. This\n reduces the risk that the KMS key becomes unmanageable. For more information, refer to the\n scenario in the Default Key Policy section of the \n Key Management Service Developer Guide\n .

      \n
    • \n
    • \n

      Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the\n \n Identity and Access Management User Guide\n .

      \n
    • \n
    \n \n \n

    A key policy document can include only the following characters:

    \n
      \n
    • \n

      Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

      \n
    • \n
    • \n

      Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

      \n
    • \n
    • \n

      The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

      \n
    • \n
    \n

    For information about key policies, see Key policies in KMS in the\n Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the \n Identity and Access Management User Guide\n .

    " } }, "BypassPolicyLockoutSafetyCheck": { @@ -4935,6 +4970,10 @@ { "value": "ECDSA_SHA_512", "name": "ECDSA_SHA_512" + }, + { + "value": "SM2DSA", + "name": "SM2DSA" } ] } @@ -5348,7 +5387,7 @@ "AliasName": { "target": "com.amazonaws.kms#AliasNameType", "traits": { - "smithy.api#documentation": "

    Identifies the alias that is changing its KMS key. This value must begin with\n alias/ followed by the alias name, such as alias/ExampleAlias. You\n cannot use UpdateAlias to change the alias name.

    ", + "smithy.api#documentation": "

    Identifies the alias that is changing its KMS key. This value must begin with\n alias/ followed by the alias name, such as alias/ExampleAlias. You\n cannot use UpdateAlias to change the alias name.

    ", "smithy.api#required": {} } }, @@ -5396,7 +5435,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Changes the properties of a custom key store. Use the CustomKeyStoreId\n parameter to identify the custom key store you want to edit. Use the remaining parameters to\n change the properties of the custom key store.

    \n

    You can only update a custom key store that is disconnected. To disconnect the custom key\n store, use DisconnectCustomKeyStore. To reconnect the custom key store after\n the update completes, use ConnectCustomKeyStore. To find the connection\n state of a custom key store, use the DescribeCustomKeyStores\n operation.

    \n

    The CustomKeyStoreId parameter is required in all commands. Use the other\n parameters of UpdateCustomKeyStore to edit your key store settings.

    \n
      \n
    • \n

      Use the NewCustomKeyStoreName parameter to change the friendly name of\n the custom key store to the value that you specify.

      \n

      \n
    • \n
    • \n

      Use the KeyStorePassword parameter tell KMS the current password of the\n \n kmsuser crypto user (CU) in the associated CloudHSM cluster. You\n can use this parameter to fix connection\n failures that occur when KMS cannot log into the associated cluster because\n the kmsuser password has changed. This value does not change the password in\n the CloudHSM cluster.

      \n

      \n
    • \n
    • \n

      Use the CloudHsmClusterId parameter to associate the custom key store\n with a different, but related, CloudHSM cluster. You can use this parameter to repair a\n custom key store if its CloudHSM cluster becomes corrupted or is deleted, or when you need to\n create or restore a cluster from a backup.

      \n
    • \n
    \n

    If the operation succeeds, it returns a JSON object with no\nproperties.

    \n

    This operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n

    \n Cross-account\n use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n

    \n Required permissions: kms:UpdateCustomKeyStore (IAM policy)

    \n

    \n Related operations:\n

    \n " + "smithy.api#documentation": "

    Changes the properties of a custom key store. Use the CustomKeyStoreId\n parameter to identify the custom key store you want to edit. Use the remaining parameters to\n change the properties of the custom key store.

    \n

    You can only update a custom key store that is disconnected. To disconnect the custom key\n store, use DisconnectCustomKeyStore. To reconnect the custom key store after\n the update completes, use ConnectCustomKeyStore. To find the connection\n state of a custom key store, use the DescribeCustomKeyStores\n operation.

    \n

    The CustomKeyStoreId parameter is required in all commands. Use the other\n parameters of UpdateCustomKeyStore to edit your key store settings.

    \n
      \n
    • \n

      Use the NewCustomKeyStoreName parameter to change the friendly name of\n the custom key store to the value that you specify.

      \n

      \n
    • \n
    • \n

      Use the KeyStorePassword parameter tell KMS the current password of the\n \n kmsuser crypto user (CU) in the associated CloudHSM cluster. You\n can use this parameter to fix connection\n failures that occur when KMS cannot log into the associated cluster because\n the kmsuser password has changed. This value does not change the password in\n the CloudHSM cluster.

      \n

      \n
    • \n
    • \n

      Use the CloudHsmClusterId parameter to associate the custom key store\n with a different, but related, CloudHSM cluster. You can use this parameter to repair a\n custom key store if its CloudHSM cluster becomes corrupted or is deleted, or when you need to\n create or restore a cluster from a backup.

      \n
    • \n
    \n

    If the operation succeeds, it returns a JSON object with no\nproperties.

    \n

    This operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.

    \n

    \n Cross-account\n use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

    \n

    \n Required permissions: kms:UpdateCustomKeyStore (IAM policy)

    \n

    \n Related operations:\n

    \n " } }, "com.amazonaws.kms#UpdateCustomKeyStoreRequest": { @@ -5570,7 +5609,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Verifies a digital signature that was generated by the Sign operation.

    \n

    \n

    Verification confirms that an authorized user signed the message with the specified KMS\n key and signing algorithm, and the message hasn't changed since it was signed. If the\n signature is verified, the value of the SignatureValid field in the response is\n True. If the signature verification fails, the Verify operation\n fails with an KMSInvalidSignatureException exception.

    \n

    A digital signature is generated by using the private key in an asymmetric KMS key. The\n signature is verified by using the public key in the same asymmetric KMS key.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

    \n

    To verify a digital signature, you can use the Verify operation. Specify the\n same asymmetric KMS key, message, and signing algorithm that were used to produce the\n signature.

    \n

    You can also verify the digital signature by using the public key of the KMS key outside\n of KMS. Use the GetPublicKey operation to download the public key in the\n asymmetric KMS key and then use the public key to verify the signature outside of KMS. The\n advantage of using the Verify operation is that it is performed within KMS. As\n a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged\n in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use\n the KMS key to verify signatures.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:Verify (key policy)

    \n

    \n Related operations: Sign\n

    " + "smithy.api#documentation": "

    Verifies a digital signature that was generated by the Sign operation.

    \n

    \n

    Verification confirms that an authorized user signed the message with the specified KMS\n key and signing algorithm, and the message hasn't changed since it was signed. If the\n signature is verified, the value of the SignatureValid field in the response is\n True. If the signature verification fails, the Verify operation\n fails with an KMSInvalidSignatureException exception.

    \n

    A digital signature is generated by using the private key in an asymmetric KMS key. The\n signature is verified by using the public key in the same asymmetric KMS key.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

    \n

    To verify a digital signature, you can use the Verify operation. Specify the\n same asymmetric KMS key, message, and signing algorithm that were used to produce the\n signature.

    \n

    You can also verify the digital signature by using the public key of the KMS key outside\n of KMS. Use the GetPublicKey operation to download the public key in the\n asymmetric KMS key and then use the public key to verify the signature outside of KMS. To \n verify a signature outside of KMS with an SM2 public key, you must specify the distinguishing \n ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more \n information, see Offline\n verification with SM2 key pairs in Key Management Service Developer Guide. The\n advantage of using the Verify operation is that it is performed within KMS. As\n a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged\n in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use\n the KMS key to verify signatures.

    \n

    The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

    \n

    \n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

    \n\n

    \n Required permissions: kms:Verify (key policy)

    \n

    \n Related operations: Sign\n

    " } }, "com.amazonaws.kms#VerifyMac": { From e65c9b2e1203291d68d0fdb11d1b467d83e37cf5 Mon Sep 17 00:00:00 2001 From: awstools Date: Mon, 18 Jul 2022 18:30:52 +0000 Subject: [PATCH 06/31] feat(client-sso-admin): AWS SSO now supports attaching customer managed policies and a permissions boundary to your permission sets. This release adds new API operations to manage and view the customer managed policies and the permissions boundary for a given permission set. --- clients/client-sso-admin/README.md | 26 +- clients/client-sso-admin/src/SSOAdmin.ts | 249 ++++++- .../client-sso-admin/src/SSOAdminClient.ts | 45 +- ...edPolicyReferenceToPermissionSetCommand.ts | 111 +++ ...tachManagedPolicyToPermissionSetCommand.ts | 2 +- ...issionsBoundaryFromPermissionSetCommand.ts | 111 +++ ...PolicyReferenceFromPermissionSetCommand.ts | 111 +++ ...chManagedPolicyFromPermissionSetCommand.ts | 2 +- ...missionsBoundaryForPermissionSetCommand.ts | 108 +++ ...dPolicyReferencesInPermissionSetCommand.ts | 111 +++ ...stManagedPoliciesInPermissionSetCommand.ts | 2 +- ...rmissionsBoundaryToPermissionSetCommand.ts | 108 +++ .../client-sso-admin/src/commands/index.ts | 6 + .../client-sso-admin/src/models/models_0.ts | 339 ++++++++- ...olicyReferencesInPermissionSetPaginator.ts | 61 ++ .../client-sso-admin/src/pagination/index.ts | 1 + .../src/protocols/Aws_json1_1.ts | 653 ++++++++++++++++++ codegen/sdk-codegen/aws-models/sso-admin.json | 574 +++++++++++++-- 18 files changed, 2509 insertions(+), 111 deletions(-) create mode 100644 clients/client-sso-admin/src/commands/AttachCustomerManagedPolicyReferenceToPermissionSetCommand.ts create mode 100644 clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts create mode 100644 clients/client-sso-admin/src/commands/DetachCustomerManagedPolicyReferenceFromPermissionSetCommand.ts create mode 100644 clients/client-sso-admin/src/commands/GetPermissionsBoundaryForPermissionSetCommand.ts create mode 100644 clients/client-sso-admin/src/commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand.ts create mode 100644 clients/client-sso-admin/src/commands/PutPermissionsBoundaryToPermissionSetCommand.ts create mode 100644 clients/client-sso-admin/src/pagination/ListCustomerManagedPolicyReferencesInPermissionSetPaginator.ts diff --git a/clients/client-sso-admin/README.md b/clients/client-sso-admin/README.md index f20dd4a443a50..991d9cb734cb1 100644 --- a/clients/client-sso-admin/README.md +++ b/clients/client-sso-admin/README.md @@ -9,14 +9,7 @@ AWS SDK for JavaScript SSOAdmin Client for Node.js, Browser and React Native. -

    Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO -access to multiple Amazon Web Services accounts and business applications. This guide provides information on -SSO operations which could be used for access management of Amazon Web Services accounts. For information about -Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide.

    -

    Many operations in the SSO APIs rely on identifiers for users and groups, known as -principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO, -see the Amazon Web Services SSO Identity Store API -Reference.

    +

    ## Installing @@ -33,16 +26,19 @@ using your favorite package manager: The AWS SDK is modulized by clients and commands. To send a request, you only need to import the `SSOAdminClient` and -the commands you need, for example `AttachManagedPolicyToPermissionSetCommand`: +the commands you need, for example `AttachCustomerManagedPolicyReferenceToPermissionSetCommand`: ```js // ES5 example -const { SSOAdminClient, AttachManagedPolicyToPermissionSetCommand } = require("@aws-sdk/client-sso-admin"); +const { + SSOAdminClient, + AttachCustomerManagedPolicyReferenceToPermissionSetCommand, +} = require("@aws-sdk/client-sso-admin"); ``` ```ts // ES6+ example -import { SSOAdminClient, AttachManagedPolicyToPermissionSetCommand } from "@aws-sdk/client-sso-admin"; +import { SSOAdminClient, AttachCustomerManagedPolicyReferenceToPermissionSetCommand } from "@aws-sdk/client-sso-admin"; ``` ### Usage @@ -61,7 +57,7 @@ const client = new SSOAdminClient({ region: "REGION" }); const params = { /** input parameters */ }; -const command = new AttachManagedPolicyToPermissionSetCommand(params); +const command = new AttachCustomerManagedPolicyReferenceToPermissionSetCommand(params); ``` #### Async/await @@ -140,7 +136,7 @@ const client = new AWS.SSOAdmin({ region: "REGION" }); // async/await. try { - const data = await client.attachManagedPolicyToPermissionSet(params); + const data = await client.attachCustomerManagedPolicyReferenceToPermissionSet(params); // process data. } catch (error) { // error handling. @@ -148,7 +144,7 @@ try { // Promises. client - .attachManagedPolicyToPermissionSet(params) + .attachCustomerManagedPolicyReferenceToPermissionSet(params) .then((data) => { // process data. }) @@ -157,7 +153,7 @@ client }); // callbacks. -client.attachManagedPolicyToPermissionSet(params, (err, data) => { +client.attachCustomerManagedPolicyReferenceToPermissionSet(params, (err, data) => { // process err and data. }); ``` diff --git a/clients/client-sso-admin/src/SSOAdmin.ts b/clients/client-sso-admin/src/SSOAdmin.ts index 00a9c6a0171af..f32c8c2a7eb71 100644 --- a/clients/client-sso-admin/src/SSOAdmin.ts +++ b/clients/client-sso-admin/src/SSOAdmin.ts @@ -1,6 +1,11 @@ // smithy-typescript generated code import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; +import { + AttachCustomerManagedPolicyReferenceToPermissionSetCommand, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput, +} from "./commands/AttachCustomerManagedPolicyReferenceToPermissionSetCommand"; import { AttachManagedPolicyToPermissionSetCommand, AttachManagedPolicyToPermissionSetCommandInput, @@ -36,6 +41,11 @@ import { DeleteInstanceAccessControlAttributeConfigurationCommandInput, DeleteInstanceAccessControlAttributeConfigurationCommandOutput, } from "./commands/DeleteInstanceAccessControlAttributeConfigurationCommand"; +import { + DeletePermissionsBoundaryFromPermissionSetCommand, + DeletePermissionsBoundaryFromPermissionSetCommandInput, + DeletePermissionsBoundaryFromPermissionSetCommandOutput, +} from "./commands/DeletePermissionsBoundaryFromPermissionSetCommand"; import { DeletePermissionSetCommand, DeletePermissionSetCommandInput, @@ -66,6 +76,11 @@ import { DescribePermissionSetProvisioningStatusCommandInput, DescribePermissionSetProvisioningStatusCommandOutput, } from "./commands/DescribePermissionSetProvisioningStatusCommand"; +import { + DetachCustomerManagedPolicyReferenceFromPermissionSetCommand, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput, +} from "./commands/DetachCustomerManagedPolicyReferenceFromPermissionSetCommand"; import { DetachManagedPolicyFromPermissionSetCommand, DetachManagedPolicyFromPermissionSetCommandInput, @@ -76,6 +91,11 @@ import { GetInlinePolicyForPermissionSetCommandInput, GetInlinePolicyForPermissionSetCommandOutput, } from "./commands/GetInlinePolicyForPermissionSetCommand"; +import { + GetPermissionsBoundaryForPermissionSetCommand, + GetPermissionsBoundaryForPermissionSetCommandInput, + GetPermissionsBoundaryForPermissionSetCommandOutput, +} from "./commands/GetPermissionsBoundaryForPermissionSetCommand"; import { ListAccountAssignmentCreationStatusCommand, ListAccountAssignmentCreationStatusCommandInput, @@ -96,6 +116,11 @@ import { ListAccountsForProvisionedPermissionSetCommandInput, ListAccountsForProvisionedPermissionSetCommandOutput, } from "./commands/ListAccountsForProvisionedPermissionSetCommand"; +import { + ListCustomerManagedPolicyReferencesInPermissionSetCommand, + ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput, +} from "./commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand"; import { ListInstancesCommand, ListInstancesCommandInput, @@ -136,6 +161,11 @@ import { PutInlinePolicyToPermissionSetCommandInput, PutInlinePolicyToPermissionSetCommandOutput, } from "./commands/PutInlinePolicyToPermissionSetCommand"; +import { + PutPermissionsBoundaryToPermissionSetCommand, + PutPermissionsBoundaryToPermissionSetCommandInput, + PutPermissionsBoundaryToPermissionSetCommandOutput, +} from "./commands/PutPermissionsBoundaryToPermissionSetCommand"; import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommand, @@ -155,18 +185,45 @@ import { import { SSOAdminClient } from "./SSOAdminClient"; /** - *

    Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO - * access to multiple Amazon Web Services accounts and business applications. This guide provides information on - * SSO operations which could be used for access management of Amazon Web Services accounts. For information about - * Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide.

    - *

    Many operations in the SSO APIs rely on identifiers for users and groups, known as - * principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO, - * see the Amazon Web Services SSO Identity Store API - * Reference.

    + *

    */ export class SSOAdmin extends SSOAdminClient { /** - *

    Attaches an IAM managed policy ARN to a permission set.

    + *

    Attaches the specified IAM customer managed policy to the specified PermissionSet.

    + */ + public attachCustomerManagedPolicyReferenceToPermissionSet( + args: AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public attachCustomerManagedPolicyReferenceToPermissionSet( + args: AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + cb: (err: any, data?: AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput) => void + ): void; + public attachCustomerManagedPolicyReferenceToPermissionSet( + args: AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput) => void + ): void; + public attachCustomerManagedPolicyReferenceToPermissionSet( + args: AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput) => void), + cb?: (err: any, data?: AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput) => void + ): Promise | void { + const command = new AttachCustomerManagedPolicyReferenceToPermissionSetCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

    Attaches an Amazon Web Services managed IAM policy ARN to a permission set.

    * *

    If the permission set is already referenced by one or more account assignments, you will * need to call @@ -426,6 +483,40 @@ export class SSOAdmin extends SSOAdminClient { } } + /** + *

    Deletes the permissions boundary from a specified PermissionSet.

    + */ + public deletePermissionsBoundaryFromPermissionSet( + args: DeletePermissionsBoundaryFromPermissionSetCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deletePermissionsBoundaryFromPermissionSet( + args: DeletePermissionsBoundaryFromPermissionSetCommandInput, + cb: (err: any, data?: DeletePermissionsBoundaryFromPermissionSetCommandOutput) => void + ): void; + public deletePermissionsBoundaryFromPermissionSet( + args: DeletePermissionsBoundaryFromPermissionSetCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeletePermissionsBoundaryFromPermissionSetCommandOutput) => void + ): void; + public deletePermissionsBoundaryFromPermissionSet( + args: DeletePermissionsBoundaryFromPermissionSetCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: DeletePermissionsBoundaryFromPermissionSetCommandOutput) => void), + cb?: (err: any, data?: DeletePermissionsBoundaryFromPermissionSetCommandOutput) => void + ): Promise | void { + const command = new DeletePermissionsBoundaryFromPermissionSetCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Deletes the specified permission set.

    */ @@ -630,7 +721,41 @@ export class SSOAdmin extends SSOAdminClient { } /** - *

    Detaches the attached IAM managed policy ARN from the specified permission set.

    + *

    Detaches the specified IAM customer managed policy from the specified PermissionSet.

    + */ + public detachCustomerManagedPolicyReferenceFromPermissionSet( + args: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public detachCustomerManagedPolicyReferenceFromPermissionSet( + args: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + cb: (err: any, data?: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput) => void + ): void; + public detachCustomerManagedPolicyReferenceFromPermissionSet( + args: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput) => void + ): void; + public detachCustomerManagedPolicyReferenceFromPermissionSet( + args: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput) => void), + cb?: (err: any, data?: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput) => void + ): Promise | void { + const command = new DetachCustomerManagedPolicyReferenceFromPermissionSetCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

    Detaches the attached Amazon Web Services managed IAM policy ARN from the specified permission set.

    */ public detachManagedPolicyFromPermissionSet( args: DetachManagedPolicyFromPermissionSetCommandInput, @@ -693,6 +818,40 @@ export class SSOAdmin extends SSOAdminClient { } } + /** + *

    Obtains the permissions boundary for a specified PermissionSet.

    + */ + public getPermissionsBoundaryForPermissionSet( + args: GetPermissionsBoundaryForPermissionSetCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getPermissionsBoundaryForPermissionSet( + args: GetPermissionsBoundaryForPermissionSetCommandInput, + cb: (err: any, data?: GetPermissionsBoundaryForPermissionSetCommandOutput) => void + ): void; + public getPermissionsBoundaryForPermissionSet( + args: GetPermissionsBoundaryForPermissionSetCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetPermissionsBoundaryForPermissionSetCommandOutput) => void + ): void; + public getPermissionsBoundaryForPermissionSet( + args: GetPermissionsBoundaryForPermissionSetCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: GetPermissionsBoundaryForPermissionSetCommandOutput) => void), + cb?: (err: any, data?: GetPermissionsBoundaryForPermissionSetCommandOutput) => void + ): Promise | void { + const command = new GetPermissionsBoundaryForPermissionSetCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Lists the status of the Amazon Web Services account assignment creation requests for a specified SSO * instance.

    @@ -826,6 +985,40 @@ export class SSOAdmin extends SSOAdminClient { } } + /** + *

    Lists all IAM customer managed policies attached to a specified PermissionSet.

    + */ + public listCustomerManagedPolicyReferencesInPermissionSet( + args: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listCustomerManagedPolicyReferencesInPermissionSet( + args: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + cb: (err: any, data?: ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput) => void + ): void; + public listCustomerManagedPolicyReferencesInPermissionSet( + args: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput) => void + ): void; + public listCustomerManagedPolicyReferencesInPermissionSet( + args: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput) => void), + cb?: (err: any, data?: ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput) => void + ): Promise | void { + const command = new ListCustomerManagedPolicyReferencesInPermissionSetCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Lists the SSO instances that the caller has access to.

    */ @@ -859,7 +1052,7 @@ export class SSOAdmin extends SSOAdminClient { } /** - *

    Lists the IAM managed policy that is attached to a specified permission set.

    + *

    Lists the Amazon Web Services managed IAM policy that is attached to a specified permission set.

    */ public listManagedPoliciesInPermissionSet( args: ListManagedPoliciesInPermissionSetCommandInput, @@ -1093,6 +1286,40 @@ export class SSOAdmin extends SSOAdminClient { } } + /** + *

    Attaches an Amazon Web Services managed or customer managed IAM policy to the specified PermissionSet as a permissions boundary.

    + */ + public putPermissionsBoundaryToPermissionSet( + args: PutPermissionsBoundaryToPermissionSetCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public putPermissionsBoundaryToPermissionSet( + args: PutPermissionsBoundaryToPermissionSetCommandInput, + cb: (err: any, data?: PutPermissionsBoundaryToPermissionSetCommandOutput) => void + ): void; + public putPermissionsBoundaryToPermissionSet( + args: PutPermissionsBoundaryToPermissionSetCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: PutPermissionsBoundaryToPermissionSetCommandOutput) => void + ): void; + public putPermissionsBoundaryToPermissionSet( + args: PutPermissionsBoundaryToPermissionSetCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: PutPermissionsBoundaryToPermissionSetCommandOutput) => void), + cb?: (err: any, data?: PutPermissionsBoundaryToPermissionSetCommandOutput) => void + ): Promise | void { + const command = new PutPermissionsBoundaryToPermissionSetCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Associates a set of tags with a specified resource.

    */ diff --git a/clients/client-sso-admin/src/SSOAdminClient.ts b/clients/client-sso-admin/src/SSOAdminClient.ts index baa86cd199d38..24faff675c1dc 100644 --- a/clients/client-sso-admin/src/SSOAdminClient.ts +++ b/clients/client-sso-admin/src/SSOAdminClient.ts @@ -53,6 +53,10 @@ import { UserAgent as __UserAgent, } from "@aws-sdk/types"; +import { + AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput, +} from "./commands/AttachCustomerManagedPolicyReferenceToPermissionSetCommand"; import { AttachManagedPolicyToPermissionSetCommandInput, AttachManagedPolicyToPermissionSetCommandOutput, @@ -81,6 +85,10 @@ import { DeleteInstanceAccessControlAttributeConfigurationCommandInput, DeleteInstanceAccessControlAttributeConfigurationCommandOutput, } from "./commands/DeleteInstanceAccessControlAttributeConfigurationCommand"; +import { + DeletePermissionsBoundaryFromPermissionSetCommandInput, + DeletePermissionsBoundaryFromPermissionSetCommandOutput, +} from "./commands/DeletePermissionsBoundaryFromPermissionSetCommand"; import { DeletePermissionSetCommandInput, DeletePermissionSetCommandOutput, @@ -105,6 +113,10 @@ import { DescribePermissionSetProvisioningStatusCommandInput, DescribePermissionSetProvisioningStatusCommandOutput, } from "./commands/DescribePermissionSetProvisioningStatusCommand"; +import { + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput, +} from "./commands/DetachCustomerManagedPolicyReferenceFromPermissionSetCommand"; import { DetachManagedPolicyFromPermissionSetCommandInput, DetachManagedPolicyFromPermissionSetCommandOutput, @@ -113,6 +125,10 @@ import { GetInlinePolicyForPermissionSetCommandInput, GetInlinePolicyForPermissionSetCommandOutput, } from "./commands/GetInlinePolicyForPermissionSetCommand"; +import { + GetPermissionsBoundaryForPermissionSetCommandInput, + GetPermissionsBoundaryForPermissionSetCommandOutput, +} from "./commands/GetPermissionsBoundaryForPermissionSetCommand"; import { ListAccountAssignmentCreationStatusCommandInput, ListAccountAssignmentCreationStatusCommandOutput, @@ -129,6 +145,10 @@ import { ListAccountsForProvisionedPermissionSetCommandInput, ListAccountsForProvisionedPermissionSetCommandOutput, } from "./commands/ListAccountsForProvisionedPermissionSetCommand"; +import { + ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput, +} from "./commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand"; import { ListInstancesCommandInput, ListInstancesCommandOutput } from "./commands/ListInstancesCommand"; import { ListManagedPoliciesInPermissionSetCommandInput, @@ -155,6 +175,10 @@ import { PutInlinePolicyToPermissionSetCommandInput, PutInlinePolicyToPermissionSetCommandOutput, } from "./commands/PutInlinePolicyToPermissionSetCommand"; +import { + PutPermissionsBoundaryToPermissionSetCommandInput, + PutPermissionsBoundaryToPermissionSetCommandOutput, +} from "./commands/PutPermissionsBoundaryToPermissionSetCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; import { @@ -168,6 +192,7 @@ import { import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = + | AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput | AttachManagedPolicyToPermissionSetCommandInput | CreateAccountAssignmentCommandInput | CreateInstanceAccessControlAttributeConfigurationCommandInput @@ -176,17 +201,21 @@ export type ServiceInputTypes = | DeleteInlinePolicyFromPermissionSetCommandInput | DeleteInstanceAccessControlAttributeConfigurationCommandInput | DeletePermissionSetCommandInput + | DeletePermissionsBoundaryFromPermissionSetCommandInput | DescribeAccountAssignmentCreationStatusCommandInput | DescribeAccountAssignmentDeletionStatusCommandInput | DescribeInstanceAccessControlAttributeConfigurationCommandInput | DescribePermissionSetCommandInput | DescribePermissionSetProvisioningStatusCommandInput + | DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput | DetachManagedPolicyFromPermissionSetCommandInput | GetInlinePolicyForPermissionSetCommandInput + | GetPermissionsBoundaryForPermissionSetCommandInput | ListAccountAssignmentCreationStatusCommandInput | ListAccountAssignmentDeletionStatusCommandInput | ListAccountAssignmentsCommandInput | ListAccountsForProvisionedPermissionSetCommandInput + | ListCustomerManagedPolicyReferencesInPermissionSetCommandInput | ListInstancesCommandInput | ListManagedPoliciesInPermissionSetCommandInput | ListPermissionSetProvisioningStatusCommandInput @@ -195,12 +224,14 @@ export type ServiceInputTypes = | ListTagsForResourceCommandInput | ProvisionPermissionSetCommandInput | PutInlinePolicyToPermissionSetCommandInput + | PutPermissionsBoundaryToPermissionSetCommandInput | TagResourceCommandInput | UntagResourceCommandInput | UpdateInstanceAccessControlAttributeConfigurationCommandInput | UpdatePermissionSetCommandInput; export type ServiceOutputTypes = + | AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput | AttachManagedPolicyToPermissionSetCommandOutput | CreateAccountAssignmentCommandOutput | CreateInstanceAccessControlAttributeConfigurationCommandOutput @@ -209,17 +240,21 @@ export type ServiceOutputTypes = | DeleteInlinePolicyFromPermissionSetCommandOutput | DeleteInstanceAccessControlAttributeConfigurationCommandOutput | DeletePermissionSetCommandOutput + | DeletePermissionsBoundaryFromPermissionSetCommandOutput | DescribeAccountAssignmentCreationStatusCommandOutput | DescribeAccountAssignmentDeletionStatusCommandOutput | DescribeInstanceAccessControlAttributeConfigurationCommandOutput | DescribePermissionSetCommandOutput | DescribePermissionSetProvisioningStatusCommandOutput + | DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput | DetachManagedPolicyFromPermissionSetCommandOutput | GetInlinePolicyForPermissionSetCommandOutput + | GetPermissionsBoundaryForPermissionSetCommandOutput | ListAccountAssignmentCreationStatusCommandOutput | ListAccountAssignmentDeletionStatusCommandOutput | ListAccountAssignmentsCommandOutput | ListAccountsForProvisionedPermissionSetCommandOutput + | ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput | ListInstancesCommandOutput | ListManagedPoliciesInPermissionSetCommandOutput | ListPermissionSetProvisioningStatusCommandOutput @@ -228,6 +263,7 @@ export type ServiceOutputTypes = | ListTagsForResourceCommandOutput | ProvisionPermissionSetCommandOutput | PutInlinePolicyToPermissionSetCommandOutput + | PutPermissionsBoundaryToPermissionSetCommandOutput | TagResourceCommandOutput | UntagResourceCommandOutput | UpdateInstanceAccessControlAttributeConfigurationCommandOutput @@ -387,14 +423,7 @@ type SSOAdminClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHand export interface SSOAdminClientResolvedConfig extends SSOAdminClientResolvedConfigType {} /** - *

    Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO - * access to multiple Amazon Web Services accounts and business applications. This guide provides information on - * SSO operations which could be used for access management of Amazon Web Services accounts. For information about - * Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide.

    - *

    Many operations in the SSO APIs rely on identifiers for users and groups, known as - * principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO, - * see the Amazon Web Services SSO Identity Store API - * Reference.

    + *

    */ export class SSOAdminClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-sso-admin/src/commands/AttachCustomerManagedPolicyReferenceToPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/AttachCustomerManagedPolicyReferenceToPermissionSetCommand.ts new file mode 100644 index 0000000000000..9e4e350c54b95 --- /dev/null +++ b/clients/client-sso-admin/src/commands/AttachCustomerManagedPolicyReferenceToPermissionSetCommand.ts @@ -0,0 +1,111 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { + AttachCustomerManagedPolicyReferenceToPermissionSetRequest, + AttachCustomerManagedPolicyReferenceToPermissionSetResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetCommand, + serializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetCommand, +} from "../protocols/Aws_json1_1"; +import { ServiceInputTypes, ServiceOutputTypes, SSOAdminClientResolvedConfig } from "../SSOAdminClient"; + +export interface AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput + extends AttachCustomerManagedPolicyReferenceToPermissionSetRequest {} +export interface AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput + extends AttachCustomerManagedPolicyReferenceToPermissionSetResponse, + __MetadataBearer {} + +/** + *

    Attaches the specified IAM customer managed policy to the specified PermissionSet.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SSOAdminClient, AttachCustomerManagedPolicyReferenceToPermissionSetCommand } from "@aws-sdk/client-sso-admin"; // ES Modules import + * // const { SSOAdminClient, AttachCustomerManagedPolicyReferenceToPermissionSetCommand } = require("@aws-sdk/client-sso-admin"); // CommonJS import + * const client = new SSOAdminClient(config); + * const command = new AttachCustomerManagedPolicyReferenceToPermissionSetCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput} for command's `input` shape. + * @see {@link AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput} for command's `response` shape. + * @see {@link SSOAdminClientResolvedConfig | config} for SSOAdminClient's `config` shape. + * + */ +export class AttachCustomerManagedPolicyReferenceToPermissionSetCommand extends $Command< + AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput, + SSOAdminClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SSOAdminClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler< + AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput + > { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SSOAdminClient"; + const commandName = "AttachCustomerManagedPolicyReferenceToPermissionSetCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: AttachCustomerManagedPolicyReferenceToPermissionSetRequest.filterSensitiveLog, + outputFilterSensitiveLog: AttachCustomerManagedPolicyReferenceToPermissionSetResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sso-admin/src/commands/AttachManagedPolicyToPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/AttachManagedPolicyToPermissionSetCommand.ts index 65b6403a6107f..c9a1ba64a6e49 100644 --- a/clients/client-sso-admin/src/commands/AttachManagedPolicyToPermissionSetCommand.ts +++ b/clients/client-sso-admin/src/commands/AttachManagedPolicyToPermissionSetCommand.ts @@ -28,7 +28,7 @@ export interface AttachManagedPolicyToPermissionSetCommandOutput __MetadataBearer {} /** - *

    Attaches an IAM managed policy ARN to a permission set.

    + *

    Attaches an Amazon Web Services managed IAM policy ARN to a permission set.

    * *

    If the permission set is already referenced by one or more account assignments, you will * need to call diff --git a/clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts new file mode 100644 index 0000000000000..abd21f373fd66 --- /dev/null +++ b/clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts @@ -0,0 +1,111 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { + DeletePermissionsBoundaryFromPermissionSetRequest, + DeletePermissionsBoundaryFromPermissionSetResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand, + serializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand, +} from "../protocols/Aws_json1_1"; +import { ServiceInputTypes, ServiceOutputTypes, SSOAdminClientResolvedConfig } from "../SSOAdminClient"; + +export interface DeletePermissionsBoundaryFromPermissionSetCommandInput + extends DeletePermissionsBoundaryFromPermissionSetRequest {} +export interface DeletePermissionsBoundaryFromPermissionSetCommandOutput + extends DeletePermissionsBoundaryFromPermissionSetResponse, + __MetadataBearer {} + +/** + *

    Deletes the permissions boundary from a specified PermissionSet.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SSOAdminClient, DeletePermissionsBoundaryFromPermissionSetCommand } from "@aws-sdk/client-sso-admin"; // ES Modules import + * // const { SSOAdminClient, DeletePermissionsBoundaryFromPermissionSetCommand } = require("@aws-sdk/client-sso-admin"); // CommonJS import + * const client = new SSOAdminClient(config); + * const command = new DeletePermissionsBoundaryFromPermissionSetCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeletePermissionsBoundaryFromPermissionSetCommandInput} for command's `input` shape. + * @see {@link DeletePermissionsBoundaryFromPermissionSetCommandOutput} for command's `response` shape. + * @see {@link SSOAdminClientResolvedConfig | config} for SSOAdminClient's `config` shape. + * + */ +export class DeletePermissionsBoundaryFromPermissionSetCommand extends $Command< + DeletePermissionsBoundaryFromPermissionSetCommandInput, + DeletePermissionsBoundaryFromPermissionSetCommandOutput, + SSOAdminClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeletePermissionsBoundaryFromPermissionSetCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SSOAdminClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler< + DeletePermissionsBoundaryFromPermissionSetCommandInput, + DeletePermissionsBoundaryFromPermissionSetCommandOutput + > { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SSOAdminClient"; + const commandName = "DeletePermissionsBoundaryFromPermissionSetCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeletePermissionsBoundaryFromPermissionSetRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeletePermissionsBoundaryFromPermissionSetResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DeletePermissionsBoundaryFromPermissionSetCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sso-admin/src/commands/DetachCustomerManagedPolicyReferenceFromPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/DetachCustomerManagedPolicyReferenceFromPermissionSetCommand.ts new file mode 100644 index 0000000000000..5ff69b1570619 --- /dev/null +++ b/clients/client-sso-admin/src/commands/DetachCustomerManagedPolicyReferenceFromPermissionSetCommand.ts @@ -0,0 +1,111 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { + DetachCustomerManagedPolicyReferenceFromPermissionSetRequest, + DetachCustomerManagedPolicyReferenceFromPermissionSetResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetCommand, + serializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetCommand, +} from "../protocols/Aws_json1_1"; +import { ServiceInputTypes, ServiceOutputTypes, SSOAdminClientResolvedConfig } from "../SSOAdminClient"; + +export interface DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput + extends DetachCustomerManagedPolicyReferenceFromPermissionSetRequest {} +export interface DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput + extends DetachCustomerManagedPolicyReferenceFromPermissionSetResponse, + __MetadataBearer {} + +/** + *

    Detaches the specified IAM customer managed policy from the specified PermissionSet.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SSOAdminClient, DetachCustomerManagedPolicyReferenceFromPermissionSetCommand } from "@aws-sdk/client-sso-admin"; // ES Modules import + * // const { SSOAdminClient, DetachCustomerManagedPolicyReferenceFromPermissionSetCommand } = require("@aws-sdk/client-sso-admin"); // CommonJS import + * const client = new SSOAdminClient(config); + * const command = new DetachCustomerManagedPolicyReferenceFromPermissionSetCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput} for command's `input` shape. + * @see {@link DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput} for command's `response` shape. + * @see {@link SSOAdminClientResolvedConfig | config} for SSOAdminClient's `config` shape. + * + */ +export class DetachCustomerManagedPolicyReferenceFromPermissionSetCommand extends $Command< + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput, + SSOAdminClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SSOAdminClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler< + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput + > { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SSOAdminClient"; + const commandName = "DetachCustomerManagedPolicyReferenceFromPermissionSetCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DetachCustomerManagedPolicyReferenceFromPermissionSetRequest.filterSensitiveLog, + outputFilterSensitiveLog: DetachCustomerManagedPolicyReferenceFromPermissionSetResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sso-admin/src/commands/DetachManagedPolicyFromPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/DetachManagedPolicyFromPermissionSetCommand.ts index 81e3699bd728e..a8c8eda27e205 100644 --- a/clients/client-sso-admin/src/commands/DetachManagedPolicyFromPermissionSetCommand.ts +++ b/clients/client-sso-admin/src/commands/DetachManagedPolicyFromPermissionSetCommand.ts @@ -28,7 +28,7 @@ export interface DetachManagedPolicyFromPermissionSetCommandOutput __MetadataBearer {} /** - *

    Detaches the attached IAM managed policy ARN from the specified permission set.

    + *

    Detaches the attached Amazon Web Services managed IAM policy ARN from the specified permission set.

    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-sso-admin/src/commands/GetPermissionsBoundaryForPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/GetPermissionsBoundaryForPermissionSetCommand.ts new file mode 100644 index 0000000000000..1611717d41b50 --- /dev/null +++ b/clients/client-sso-admin/src/commands/GetPermissionsBoundaryForPermissionSetCommand.ts @@ -0,0 +1,108 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { + GetPermissionsBoundaryForPermissionSetRequest, + GetPermissionsBoundaryForPermissionSetResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1GetPermissionsBoundaryForPermissionSetCommand, + serializeAws_json1_1GetPermissionsBoundaryForPermissionSetCommand, +} from "../protocols/Aws_json1_1"; +import { ServiceInputTypes, ServiceOutputTypes, SSOAdminClientResolvedConfig } from "../SSOAdminClient"; + +export interface GetPermissionsBoundaryForPermissionSetCommandInput + extends GetPermissionsBoundaryForPermissionSetRequest {} +export interface GetPermissionsBoundaryForPermissionSetCommandOutput + extends GetPermissionsBoundaryForPermissionSetResponse, + __MetadataBearer {} + +/** + *

    Obtains the permissions boundary for a specified PermissionSet.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SSOAdminClient, GetPermissionsBoundaryForPermissionSetCommand } from "@aws-sdk/client-sso-admin"; // ES Modules import + * // const { SSOAdminClient, GetPermissionsBoundaryForPermissionSetCommand } = require("@aws-sdk/client-sso-admin"); // CommonJS import + * const client = new SSOAdminClient(config); + * const command = new GetPermissionsBoundaryForPermissionSetCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetPermissionsBoundaryForPermissionSetCommandInput} for command's `input` shape. + * @see {@link GetPermissionsBoundaryForPermissionSetCommandOutput} for command's `response` shape. + * @see {@link SSOAdminClientResolvedConfig | config} for SSOAdminClient's `config` shape. + * + */ +export class GetPermissionsBoundaryForPermissionSetCommand extends $Command< + GetPermissionsBoundaryForPermissionSetCommandInput, + GetPermissionsBoundaryForPermissionSetCommandOutput, + SSOAdminClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetPermissionsBoundaryForPermissionSetCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SSOAdminClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SSOAdminClient"; + const commandName = "GetPermissionsBoundaryForPermissionSetCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetPermissionsBoundaryForPermissionSetRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetPermissionsBoundaryForPermissionSetResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: GetPermissionsBoundaryForPermissionSetCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1GetPermissionsBoundaryForPermissionSetCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1GetPermissionsBoundaryForPermissionSetCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sso-admin/src/commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand.ts new file mode 100644 index 0000000000000..d91e42ce88602 --- /dev/null +++ b/clients/client-sso-admin/src/commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand.ts @@ -0,0 +1,111 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { + ListCustomerManagedPolicyReferencesInPermissionSetRequest, + ListCustomerManagedPolicyReferencesInPermissionSetResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetCommand, + serializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetCommand, +} from "../protocols/Aws_json1_1"; +import { ServiceInputTypes, ServiceOutputTypes, SSOAdminClientResolvedConfig } from "../SSOAdminClient"; + +export interface ListCustomerManagedPolicyReferencesInPermissionSetCommandInput + extends ListCustomerManagedPolicyReferencesInPermissionSetRequest {} +export interface ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput + extends ListCustomerManagedPolicyReferencesInPermissionSetResponse, + __MetadataBearer {} + +/** + *

    Lists all IAM customer managed policies attached to a specified PermissionSet.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SSOAdminClient, ListCustomerManagedPolicyReferencesInPermissionSetCommand } from "@aws-sdk/client-sso-admin"; // ES Modules import + * // const { SSOAdminClient, ListCustomerManagedPolicyReferencesInPermissionSetCommand } = require("@aws-sdk/client-sso-admin"); // CommonJS import + * const client = new SSOAdminClient(config); + * const command = new ListCustomerManagedPolicyReferencesInPermissionSetCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListCustomerManagedPolicyReferencesInPermissionSetCommandInput} for command's `input` shape. + * @see {@link ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput} for command's `response` shape. + * @see {@link SSOAdminClientResolvedConfig | config} for SSOAdminClient's `config` shape. + * + */ +export class ListCustomerManagedPolicyReferencesInPermissionSetCommand extends $Command< + ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput, + SSOAdminClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SSOAdminClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler< + ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput + > { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SSOAdminClient"; + const commandName = "ListCustomerManagedPolicyReferencesInPermissionSetCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListCustomerManagedPolicyReferencesInPermissionSetRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListCustomerManagedPolicyReferencesInPermissionSetResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sso-admin/src/commands/ListManagedPoliciesInPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/ListManagedPoliciesInPermissionSetCommand.ts index 6bb477163a33b..c2ff92f1bbc8b 100644 --- a/clients/client-sso-admin/src/commands/ListManagedPoliciesInPermissionSetCommand.ts +++ b/clients/client-sso-admin/src/commands/ListManagedPoliciesInPermissionSetCommand.ts @@ -28,7 +28,7 @@ export interface ListManagedPoliciesInPermissionSetCommandOutput __MetadataBearer {} /** - *

    Lists the IAM managed policy that is attached to a specified permission set.

    + *

    Lists the Amazon Web Services managed IAM policy that is attached to a specified permission set.

    * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-sso-admin/src/commands/PutPermissionsBoundaryToPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/PutPermissionsBoundaryToPermissionSetCommand.ts new file mode 100644 index 0000000000000..432fd05737b64 --- /dev/null +++ b/clients/client-sso-admin/src/commands/PutPermissionsBoundaryToPermissionSetCommand.ts @@ -0,0 +1,108 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { + PutPermissionsBoundaryToPermissionSetRequest, + PutPermissionsBoundaryToPermissionSetResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1PutPermissionsBoundaryToPermissionSetCommand, + serializeAws_json1_1PutPermissionsBoundaryToPermissionSetCommand, +} from "../protocols/Aws_json1_1"; +import { ServiceInputTypes, ServiceOutputTypes, SSOAdminClientResolvedConfig } from "../SSOAdminClient"; + +export interface PutPermissionsBoundaryToPermissionSetCommandInput + extends PutPermissionsBoundaryToPermissionSetRequest {} +export interface PutPermissionsBoundaryToPermissionSetCommandOutput + extends PutPermissionsBoundaryToPermissionSetResponse, + __MetadataBearer {} + +/** + *

    Attaches an Amazon Web Services managed or customer managed IAM policy to the specified PermissionSet as a permissions boundary.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SSOAdminClient, PutPermissionsBoundaryToPermissionSetCommand } from "@aws-sdk/client-sso-admin"; // ES Modules import + * // const { SSOAdminClient, PutPermissionsBoundaryToPermissionSetCommand } = require("@aws-sdk/client-sso-admin"); // CommonJS import + * const client = new SSOAdminClient(config); + * const command = new PutPermissionsBoundaryToPermissionSetCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link PutPermissionsBoundaryToPermissionSetCommandInput} for command's `input` shape. + * @see {@link PutPermissionsBoundaryToPermissionSetCommandOutput} for command's `response` shape. + * @see {@link SSOAdminClientResolvedConfig | config} for SSOAdminClient's `config` shape. + * + */ +export class PutPermissionsBoundaryToPermissionSetCommand extends $Command< + PutPermissionsBoundaryToPermissionSetCommandInput, + PutPermissionsBoundaryToPermissionSetCommandOutput, + SSOAdminClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: PutPermissionsBoundaryToPermissionSetCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SSOAdminClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SSOAdminClient"; + const commandName = "PutPermissionsBoundaryToPermissionSetCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: PutPermissionsBoundaryToPermissionSetRequest.filterSensitiveLog, + outputFilterSensitiveLog: PutPermissionsBoundaryToPermissionSetResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: PutPermissionsBoundaryToPermissionSetCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1PutPermissionsBoundaryToPermissionSetCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1PutPermissionsBoundaryToPermissionSetCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sso-admin/src/commands/index.ts b/clients/client-sso-admin/src/commands/index.ts index e3c5cd457335e..9183e43b43870 100644 --- a/clients/client-sso-admin/src/commands/index.ts +++ b/clients/client-sso-admin/src/commands/index.ts @@ -1,4 +1,5 @@ // smithy-typescript generated code +export * from "./AttachCustomerManagedPolicyReferenceToPermissionSetCommand"; export * from "./AttachManagedPolicyToPermissionSetCommand"; export * from "./CreateAccountAssignmentCommand"; export * from "./CreateInstanceAccessControlAttributeConfigurationCommand"; @@ -7,17 +8,21 @@ export * from "./DeleteAccountAssignmentCommand"; export * from "./DeleteInlinePolicyFromPermissionSetCommand"; export * from "./DeleteInstanceAccessControlAttributeConfigurationCommand"; export * from "./DeletePermissionSetCommand"; +export * from "./DeletePermissionsBoundaryFromPermissionSetCommand"; export * from "./DescribeAccountAssignmentCreationStatusCommand"; export * from "./DescribeAccountAssignmentDeletionStatusCommand"; export * from "./DescribeInstanceAccessControlAttributeConfigurationCommand"; export * from "./DescribePermissionSetCommand"; export * from "./DescribePermissionSetProvisioningStatusCommand"; +export * from "./DetachCustomerManagedPolicyReferenceFromPermissionSetCommand"; export * from "./DetachManagedPolicyFromPermissionSetCommand"; export * from "./GetInlinePolicyForPermissionSetCommand"; +export * from "./GetPermissionsBoundaryForPermissionSetCommand"; export * from "./ListAccountAssignmentCreationStatusCommand"; export * from "./ListAccountAssignmentDeletionStatusCommand"; export * from "./ListAccountAssignmentsCommand"; export * from "./ListAccountsForProvisionedPermissionSetCommand"; +export * from "./ListCustomerManagedPolicyReferencesInPermissionSetCommand"; export * from "./ListInstancesCommand"; export * from "./ListManagedPoliciesInPermissionSetCommand"; export * from "./ListPermissionSetProvisioningStatusCommand"; @@ -26,6 +31,7 @@ export * from "./ListPermissionSetsProvisionedToAccountCommand"; export * from "./ListTagsForResourceCommand"; export * from "./ProvisionPermissionSetCommand"; export * from "./PutInlinePolicyToPermissionSetCommand"; +export * from "./PutPermissionsBoundaryToPermissionSetCommand"; export * from "./TagResourceCommand"; export * from "./UntagResourceCommand"; export * from "./UpdateInstanceAccessControlAttributeConfigurationCommand"; diff --git a/clients/client-sso-admin/src/models/models_0.ts b/clients/client-sso-admin/src/models/models_0.ts index 51b3b88017b5f..5ed9c44fa07d9 100644 --- a/clients/client-sso-admin/src/models/models_0.ts +++ b/clients/client-sso-admin/src/models/models_0.ts @@ -1,10 +1,10 @@ // smithy-typescript generated code -import { ExceptionOptionType as __ExceptionOptionType, SENSITIVE_STRING } from "@aws-sdk/smithy-client"; +import { ExceptionOptionType as __ExceptionOptionType } from "@aws-sdk/smithy-client"; import { SSOAdminServiceException as __BaseException } from "./SSOAdminServiceException"; /** - *

    The value used for mapping a specified attribute to an identity source.

    + *

    The value used for mapping a specified attribute to an identity source. For more information, see Attribute mappings in the Amazon Web Services Single Sign-On User Guide.

    */ export interface AccessControlAttributeValue { /** @@ -222,65 +222,62 @@ export namespace AccountAssignmentOperationStatusMetadata { } /** - *

    A structure that stores the details of the IAM managed policy.

    + *

    Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set.

    */ -export interface AttachedManagedPolicy { +export interface CustomerManagedPolicyReference { /** - *

    The name of the IAM managed policy.

    + *

    The name of the policy document.

    */ - Name?: string; + Name: string | undefined; /** - *

    The ARN of the IAM managed policy. For more information about ARNs, see Amazon Resource - * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

    + *

    The path for the policy. The default is /. For more information, see Friendly names and paths in the Identity and Access Management user guide.

    */ - Arn?: string; + Path?: string; } -export namespace AttachedManagedPolicy { +export namespace CustomerManagedPolicyReference { /** * @internal */ - export const filterSensitiveLog = (obj: AttachedManagedPolicy): any => ({ + export const filterSensitiveLog = (obj: CustomerManagedPolicyReference): any => ({ ...obj, }); } -export interface AttachManagedPolicyToPermissionSetRequest { +export interface AttachCustomerManagedPolicyReferenceToPermissionSetRequest { /** - *

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource - * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

    + *

    The ARN of the SSO instance under which the operation will be executed.

    */ InstanceArn: string | undefined; /** - *

    The ARN of the PermissionSet that the managed policy should be attached - * to.

    + *

    The ARN of the PermissionSet.

    */ PermissionSetArn: string | undefined; /** - *

    The IAM managed policy ARN to be attached to a permission set.

    + *

    Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set.

    */ - ManagedPolicyArn: string | undefined; + CustomerManagedPolicyReference: CustomerManagedPolicyReference | undefined; } -export namespace AttachManagedPolicyToPermissionSetRequest { +export namespace AttachCustomerManagedPolicyReferenceToPermissionSetRequest { /** * @internal */ - export const filterSensitiveLog = (obj: AttachManagedPolicyToPermissionSetRequest): any => ({ + export const filterSensitiveLog = (obj: AttachCustomerManagedPolicyReferenceToPermissionSetRequest): any => ({ ...obj, }); } -export interface AttachManagedPolicyToPermissionSetResponse {} +export interface AttachCustomerManagedPolicyReferenceToPermissionSetResponse {} -export namespace AttachManagedPolicyToPermissionSetResponse { +export namespace AttachCustomerManagedPolicyReferenceToPermissionSetResponse { /** * @internal */ - export const filterSensitiveLog = (obj: AttachManagedPolicyToPermissionSetResponse): any => ({ + export const filterSensitiveLog = (obj: AttachCustomerManagedPolicyReferenceToPermissionSetResponse): any => ({ ...obj, }); } @@ -417,6 +414,70 @@ export class ValidationException extends __BaseException { } } +/** + *

    A structure that stores the details of the Amazon Web Services managed IAM policy.

    + */ +export interface AttachedManagedPolicy { + /** + *

    The name of the Amazon Web Services managed IAM policy.

    + */ + Name?: string; + + /** + *

    The ARN of the Amazon Web Services managed IAM policy. For more information about ARNs, see Amazon Resource + * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

    + */ + Arn?: string; +} + +export namespace AttachedManagedPolicy { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AttachedManagedPolicy): any => ({ + ...obj, + }); +} + +export interface AttachManagedPolicyToPermissionSetRequest { + /** + *

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource + * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

    + */ + InstanceArn: string | undefined; + + /** + *

    The ARN of the PermissionSet that the managed policy should be attached + * to.

    + */ + PermissionSetArn: string | undefined; + + /** + *

    The Amazon Web Services managed policy ARN to be attached to a permission set.

    + */ + ManagedPolicyArn: string | undefined; +} + +export namespace AttachManagedPolicyToPermissionSetRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AttachManagedPolicyToPermissionSetRequest): any => ({ + ...obj, + }); +} + +export interface AttachManagedPolicyToPermissionSetResponse {} + +export namespace AttachManagedPolicyToPermissionSetResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AttachManagedPolicyToPermissionSetResponse): any => ({ + ...obj, + }); +} + export interface CreateAccountAssignmentRequest { /** *

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource @@ -543,12 +604,12 @@ export interface Tag { /** *

    The key for the tag.

    */ - Key?: string; + Key: string | undefined; /** *

    The value of the tag.

    */ - Value?: string; + Value: string | undefined; } export namespace Tag { @@ -785,6 +846,38 @@ export namespace DeleteInstanceAccessControlAttributeConfigurationResponse { }); } +export interface DeletePermissionsBoundaryFromPermissionSetRequest { + /** + *

    The ARN of the SSO instance under which the operation will be executed.

    + */ + InstanceArn: string | undefined; + + /** + *

    The ARN of the PermissionSet.

    + */ + PermissionSetArn: string | undefined; +} + +export namespace DeletePermissionsBoundaryFromPermissionSetRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeletePermissionsBoundaryFromPermissionSetRequest): any => ({ + ...obj, + }); +} + +export interface DeletePermissionsBoundaryFromPermissionSetResponse {} + +export namespace DeletePermissionsBoundaryFromPermissionSetResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeletePermissionsBoundaryFromPermissionSetResponse): any => ({ + ...obj, + }); +} + export interface DeletePermissionSetRequest { /** *

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource @@ -1067,6 +1160,43 @@ export namespace DescribePermissionSetProvisioningStatusResponse { }); } +export interface DetachCustomerManagedPolicyReferenceFromPermissionSetRequest { + /** + *

    The ARN of the SSO instance under which the operation will be executed.

    + */ + InstanceArn: string | undefined; + + /** + *

    The ARN of the PermissionSet.

    + */ + PermissionSetArn: string | undefined; + + /** + *

    Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set.

    + */ + CustomerManagedPolicyReference: CustomerManagedPolicyReference | undefined; +} + +export namespace DetachCustomerManagedPolicyReferenceFromPermissionSetRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DetachCustomerManagedPolicyReferenceFromPermissionSetRequest): any => ({ + ...obj, + }); +} + +export interface DetachCustomerManagedPolicyReferenceFromPermissionSetResponse {} + +export namespace DetachCustomerManagedPolicyReferenceFromPermissionSetResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DetachCustomerManagedPolicyReferenceFromPermissionSetResponse): any => ({ + ...obj, + }); +} + export interface DetachManagedPolicyFromPermissionSetRequest { /** *

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource @@ -1081,7 +1211,7 @@ export interface DetachManagedPolicyFromPermissionSetRequest { PermissionSetArn: string | undefined; /** - *

    The IAM managed policy ARN to be attached to a permission set.

    + *

    The Amazon Web Services managed policy ARN to be detached from a permission set.

    */ ManagedPolicyArn: string | undefined; } @@ -1141,7 +1271,72 @@ export namespace GetInlinePolicyForPermissionSetResponse { */ export const filterSensitiveLog = (obj: GetInlinePolicyForPermissionSetResponse): any => ({ ...obj, - ...(obj.InlinePolicy && { InlinePolicy: SENSITIVE_STRING }), + }); +} + +export interface GetPermissionsBoundaryForPermissionSetRequest { + /** + *

    The ARN of the SSO instance under which the operation will be executed.

    + */ + InstanceArn: string | undefined; + + /** + *

    The ARN of the PermissionSet.

    + */ + PermissionSetArn: string | undefined; +} + +export namespace GetPermissionsBoundaryForPermissionSetRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPermissionsBoundaryForPermissionSetRequest): any => ({ + ...obj, + }); +} + +/** + *

    Specifies the configuration of the Amazon Web Services managed or customer managed policy that you want to set as a permissions boundary. Specify either CustomerManagedPolicyReference + * to use the name and path of a customer managed policy, or ManagedPolicyArn to use the ARN of an Amazon Web Services managed IAM policy. A permissions boundary represents the maximum permissions that any policy + * can grant your role. For more information, see Permissions boundaries for IAM entities in the Identity and Access Management User Guide.

    + * + *

    Policies used as permissions boundaries do not provide permissions. You must also attach an IAM policy to the role. To learn how the effective permissions for a role are evaluated, see IAM JSON policy evaluation logic in the Identity and Access Management User Guide.

    + *
    + */ +export interface PermissionsBoundary { + /** + *

    Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set.

    + */ + CustomerManagedPolicyReference?: CustomerManagedPolicyReference; + + /** + *

    The Amazon Web Services managed policy ARN that you want to attach to a permission set as a permissions boundary.

    + */ + ManagedPolicyArn?: string; +} + +export namespace PermissionsBoundary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PermissionsBoundary): any => ({ + ...obj, + }); +} + +export interface GetPermissionsBoundaryForPermissionSetResponse { + /** + *

    The permissions boundary attached to the specified permission set.

    + */ + PermissionsBoundary?: PermissionsBoundary; +} + +export namespace GetPermissionsBoundaryForPermissionSetResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPermissionsBoundaryForPermissionSetResponse): any => ({ + ...obj, }); } @@ -1417,6 +1612,58 @@ export namespace ListAccountsForProvisionedPermissionSetResponse { }); } +export interface ListCustomerManagedPolicyReferencesInPermissionSetRequest { + /** + *

    The ARN of the SSO instance under which the operation will be executed.

    + */ + InstanceArn: string | undefined; + + /** + *

    The ARN of the PermissionSet.

    + */ + PermissionSetArn: string | undefined; + + /** + *

    The maximum number of results to display for the list call.

    + */ + MaxResults?: number; + + /** + *

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    + */ + NextToken?: string; +} + +export namespace ListCustomerManagedPolicyReferencesInPermissionSetRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListCustomerManagedPolicyReferencesInPermissionSetRequest): any => ({ + ...obj, + }); +} + +export interface ListCustomerManagedPolicyReferencesInPermissionSetResponse { + /** + *

    Specifies the names and paths of the IAM customer managed policies that you have attached to your permission set.

    + */ + CustomerManagedPolicyReferences?: CustomerManagedPolicyReference[]; + + /** + *

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    + */ + NextToken?: string; +} + +export namespace ListCustomerManagedPolicyReferencesInPermissionSetResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListCustomerManagedPolicyReferencesInPermissionSetResponse): any => ({ + ...obj, + }); +} + export interface ListInstancesRequest { /** *

    The maximum number of results to display for the instance.

    @@ -1827,7 +2074,6 @@ export namespace PutInlinePolicyToPermissionSetRequest { */ export const filterSensitiveLog = (obj: PutInlinePolicyToPermissionSetRequest): any => ({ ...obj, - ...(obj.InlinePolicy && { InlinePolicy: SENSITIVE_STRING }), }); } @@ -1842,6 +2088,43 @@ export namespace PutInlinePolicyToPermissionSetResponse { }); } +export interface PutPermissionsBoundaryToPermissionSetRequest { + /** + *

    The ARN of the SSO instance under which the operation will be executed.

    + */ + InstanceArn: string | undefined; + + /** + *

    The ARN of the PermissionSet.

    + */ + PermissionSetArn: string | undefined; + + /** + *

    The permissions boundary that you want to attach to a PermissionSet.

    + */ + PermissionsBoundary: PermissionsBoundary | undefined; +} + +export namespace PutPermissionsBoundaryToPermissionSetRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutPermissionsBoundaryToPermissionSetRequest): any => ({ + ...obj, + }); +} + +export interface PutPermissionsBoundaryToPermissionSetResponse {} + +export namespace PutPermissionsBoundaryToPermissionSetResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutPermissionsBoundaryToPermissionSetResponse): any => ({ + ...obj, + }); +} + export interface TagResourceRequest { /** *

    The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource diff --git a/clients/client-sso-admin/src/pagination/ListCustomerManagedPolicyReferencesInPermissionSetPaginator.ts b/clients/client-sso-admin/src/pagination/ListCustomerManagedPolicyReferencesInPermissionSetPaginator.ts new file mode 100644 index 0000000000000..3060e1542838e --- /dev/null +++ b/clients/client-sso-admin/src/pagination/ListCustomerManagedPolicyReferencesInPermissionSetPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListCustomerManagedPolicyReferencesInPermissionSetCommand, + ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput, +} from "../commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand"; +import { SSOAdmin } from "../SSOAdmin"; +import { SSOAdminClient } from "../SSOAdminClient"; +import { SSOAdminPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: SSOAdminClient, + input: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListCustomerManagedPolicyReferencesInPermissionSetCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: SSOAdmin, + input: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listCustomerManagedPolicyReferencesInPermissionSet(input, ...args); +}; +export async function* paginateListCustomerManagedPolicyReferencesInPermissionSet( + config: SSOAdminPaginationConfiguration, + input: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof SSOAdmin) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof SSOAdminClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected SSOAdmin | SSOAdminClient"); + } + yield page; + const prevToken = token; + token = page.NextToken; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-sso-admin/src/pagination/index.ts b/clients/client-sso-admin/src/pagination/index.ts index 76ebc9f7594dd..72d5725a589dc 100644 --- a/clients/client-sso-admin/src/pagination/index.ts +++ b/clients/client-sso-admin/src/pagination/index.ts @@ -4,6 +4,7 @@ export * from "./ListAccountAssignmentCreationStatusPaginator"; export * from "./ListAccountAssignmentDeletionStatusPaginator"; export * from "./ListAccountAssignmentsPaginator"; export * from "./ListAccountsForProvisionedPermissionSetPaginator"; +export * from "./ListCustomerManagedPolicyReferencesInPermissionSetPaginator"; export * from "./ListInstancesPaginator"; export * from "./ListManagedPoliciesInPermissionSetPaginator"; export * from "./ListPermissionSetProvisioningStatusPaginator"; diff --git a/clients/client-sso-admin/src/protocols/Aws_json1_1.ts b/clients/client-sso-admin/src/protocols/Aws_json1_1.ts index 109a7c0468eea..bd1d5ce354c32 100644 --- a/clients/client-sso-admin/src/protocols/Aws_json1_1.ts +++ b/clients/client-sso-admin/src/protocols/Aws_json1_1.ts @@ -14,6 +14,10 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; +import { + AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput, +} from "../commands/AttachCustomerManagedPolicyReferenceToPermissionSetCommand"; import { AttachManagedPolicyToPermissionSetCommandInput, AttachManagedPolicyToPermissionSetCommandOutput, @@ -42,6 +46,10 @@ import { DeleteInstanceAccessControlAttributeConfigurationCommandInput, DeleteInstanceAccessControlAttributeConfigurationCommandOutput, } from "../commands/DeleteInstanceAccessControlAttributeConfigurationCommand"; +import { + DeletePermissionsBoundaryFromPermissionSetCommandInput, + DeletePermissionsBoundaryFromPermissionSetCommandOutput, +} from "../commands/DeletePermissionsBoundaryFromPermissionSetCommand"; import { DeletePermissionSetCommandInput, DeletePermissionSetCommandOutput, @@ -66,6 +74,10 @@ import { DescribePermissionSetProvisioningStatusCommandInput, DescribePermissionSetProvisioningStatusCommandOutput, } from "../commands/DescribePermissionSetProvisioningStatusCommand"; +import { + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput, +} from "../commands/DetachCustomerManagedPolicyReferenceFromPermissionSetCommand"; import { DetachManagedPolicyFromPermissionSetCommandInput, DetachManagedPolicyFromPermissionSetCommandOutput, @@ -74,6 +86,10 @@ import { GetInlinePolicyForPermissionSetCommandInput, GetInlinePolicyForPermissionSetCommandOutput, } from "../commands/GetInlinePolicyForPermissionSetCommand"; +import { + GetPermissionsBoundaryForPermissionSetCommandInput, + GetPermissionsBoundaryForPermissionSetCommandOutput, +} from "../commands/GetPermissionsBoundaryForPermissionSetCommand"; import { ListAccountAssignmentCreationStatusCommandInput, ListAccountAssignmentCreationStatusCommandOutput, @@ -90,6 +106,10 @@ import { ListAccountsForProvisionedPermissionSetCommandInput, ListAccountsForProvisionedPermissionSetCommandOutput, } from "../commands/ListAccountsForProvisionedPermissionSetCommand"; +import { + ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput, +} from "../commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand"; import { ListInstancesCommandInput, ListInstancesCommandOutput } from "../commands/ListInstancesCommand"; import { ListManagedPoliciesInPermissionSetCommandInput, @@ -116,6 +136,10 @@ import { PutInlinePolicyToPermissionSetCommandInput, PutInlinePolicyToPermissionSetCommandOutput, } from "../commands/PutInlinePolicyToPermissionSetCommand"; +import { + PutPermissionsBoundaryToPermissionSetCommandInput, + PutPermissionsBoundaryToPermissionSetCommandOutput, +} from "../commands/PutPermissionsBoundaryToPermissionSetCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; import { @@ -133,6 +157,8 @@ import { AccountAssignment, AccountAssignmentOperationStatus, AccountAssignmentOperationStatusMetadata, + AttachCustomerManagedPolicyReferenceToPermissionSetRequest, + AttachCustomerManagedPolicyReferenceToPermissionSetResponse, AttachedManagedPolicy, AttachManagedPolicyToPermissionSetRequest, AttachManagedPolicyToPermissionSetResponse, @@ -143,12 +169,15 @@ import { CreateInstanceAccessControlAttributeConfigurationResponse, CreatePermissionSetRequest, CreatePermissionSetResponse, + CustomerManagedPolicyReference, DeleteAccountAssignmentRequest, DeleteAccountAssignmentResponse, DeleteInlinePolicyFromPermissionSetRequest, DeleteInlinePolicyFromPermissionSetResponse, DeleteInstanceAccessControlAttributeConfigurationRequest, DeleteInstanceAccessControlAttributeConfigurationResponse, + DeletePermissionsBoundaryFromPermissionSetRequest, + DeletePermissionsBoundaryFromPermissionSetResponse, DeletePermissionSetRequest, DeletePermissionSetResponse, DescribeAccountAssignmentCreationStatusRequest, @@ -161,10 +190,14 @@ import { DescribePermissionSetProvisioningStatusResponse, DescribePermissionSetRequest, DescribePermissionSetResponse, + DetachCustomerManagedPolicyReferenceFromPermissionSetRequest, + DetachCustomerManagedPolicyReferenceFromPermissionSetResponse, DetachManagedPolicyFromPermissionSetRequest, DetachManagedPolicyFromPermissionSetResponse, GetInlinePolicyForPermissionSetRequest, GetInlinePolicyForPermissionSetResponse, + GetPermissionsBoundaryForPermissionSetRequest, + GetPermissionsBoundaryForPermissionSetResponse, InstanceAccessControlAttributeConfiguration, InstanceMetadata, InternalServerException, @@ -176,6 +209,8 @@ import { ListAccountAssignmentsResponse, ListAccountsForProvisionedPermissionSetRequest, ListAccountsForProvisionedPermissionSetResponse, + ListCustomerManagedPolicyReferencesInPermissionSetRequest, + ListCustomerManagedPolicyReferencesInPermissionSetResponse, ListInstancesRequest, ListInstancesResponse, ListManagedPoliciesInPermissionSetRequest, @@ -189,6 +224,7 @@ import { ListTagsForResourceRequest, ListTagsForResourceResponse, OperationStatusFilter, + PermissionsBoundary, PermissionSet, PermissionSetProvisioningStatus, PermissionSetProvisioningStatusMetadata, @@ -196,6 +232,8 @@ import { ProvisionPermissionSetResponse, PutInlinePolicyToPermissionSetRequest, PutInlinePolicyToPermissionSetResponse, + PutPermissionsBoundaryToPermissionSetRequest, + PutPermissionsBoundaryToPermissionSetResponse, ResourceNotFoundException, ServiceQuotaExceededException, Tag, @@ -212,6 +250,19 @@ import { } from "../models/models_0"; import { SSOAdminServiceException as __BaseException } from "../models/SSOAdminServiceException"; +export const serializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetCommand = async ( + input: AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SWBExternalService.AttachCustomerManagedPolicyReferenceToPermissionSet", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1AttachManagedPolicyToPermissionSetCommand = async ( input: AttachManagedPolicyToPermissionSetCommandInput, context: __SerdeContext @@ -303,6 +354,19 @@ export const serializeAws_json1_1DeleteInstanceAccessControlAttributeConfigurati return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand = async ( + input: DeletePermissionsBoundaryFromPermissionSetCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SWBExternalService.DeletePermissionsBoundaryFromPermissionSet", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DeletePermissionSetCommand = async ( input: DeletePermissionSetCommandInput, context: __SerdeContext @@ -381,6 +445,21 @@ export const serializeAws_json1_1DescribePermissionSetProvisioningStatusCommand return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetCommand = async ( + input: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SWBExternalService.DetachCustomerManagedPolicyReferenceFromPermissionSet", + }; + let body: any; + body = JSON.stringify( + serializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetRequest(input, context) + ); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DetachManagedPolicyFromPermissionSetCommand = async ( input: DetachManagedPolicyFromPermissionSetCommandInput, context: __SerdeContext @@ -407,6 +486,19 @@ export const serializeAws_json1_1GetInlinePolicyForPermissionSetCommand = async return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1GetPermissionsBoundaryForPermissionSetCommand = async ( + input: GetPermissionsBoundaryForPermissionSetCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SWBExternalService.GetPermissionsBoundaryForPermissionSet", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1GetPermissionsBoundaryForPermissionSetRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1ListAccountAssignmentCreationStatusCommand = async ( input: ListAccountAssignmentCreationStatusCommandInput, context: __SerdeContext @@ -459,6 +551,19 @@ export const serializeAws_json1_1ListAccountsForProvisionedPermissionSetCommand return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetCommand = async ( + input: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SWBExternalService.ListCustomerManagedPolicyReferencesInPermissionSet", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1ListInstancesCommand = async ( input: ListInstancesCommandInput, context: __SerdeContext @@ -563,6 +668,19 @@ export const serializeAws_json1_1PutInlinePolicyToPermissionSetCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1PutPermissionsBoundaryToPermissionSetCommand = async ( + input: PutPermissionsBoundaryToPermissionSetCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SWBExternalService.PutPermissionsBoundaryToPermissionSet", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1PutPermissionsBoundaryToPermissionSetRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1TagResourceCommand = async ( input: TagResourceCommandInput, context: __SerdeContext @@ -615,6 +733,68 @@ export const serializeAws_json1_1UpdatePermissionSetCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const deserializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetResponse(data, context); + const response: AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.ssoadmin#AccessDeniedException": + throw await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.ssoadmin#ConflictException": + throw await deserializeAws_json1_1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.ssoadmin#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.ssoadmin#ResourceNotFoundException": + throw await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ServiceQuotaExceededException": + case "com.amazonaws.ssoadmin#ServiceQuotaExceededException": + throw await deserializeAws_json1_1ServiceQuotaExceededExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.ssoadmin#ThrottlingException": + throw await deserializeAws_json1_1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.ssoadmin#ValidationException": + throw await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1AttachManagedPolicyToPermissionSetCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1037,6 +1217,62 @@ const deserializeAws_json1_1DeleteInstanceAccessControlAttributeConfigurationCom } }; +export const deserializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetResponse(data, context); + const response: DeletePermissionsBoundaryFromPermissionSetCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.ssoadmin#AccessDeniedException": + throw await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.ssoadmin#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.ssoadmin#ResourceNotFoundException": + throw await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.ssoadmin#ThrottlingException": + throw await deserializeAws_json1_1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.ssoadmin#ValidationException": + throw await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1DeletePermissionSetCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1376,6 +1612,65 @@ const deserializeAws_json1_1DescribePermissionSetProvisioningStatusCommandError } }; +export const deserializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetResponse(data, context); + const response: DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.ssoadmin#AccessDeniedException": + throw await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.ssoadmin#ConflictException": + throw await deserializeAws_json1_1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.ssoadmin#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.ssoadmin#ResourceNotFoundException": + throw await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.ssoadmin#ThrottlingException": + throw await deserializeAws_json1_1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.ssoadmin#ValidationException": + throw await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1DetachManagedPolicyFromPermissionSetCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1491,6 +1786,62 @@ const deserializeAws_json1_1GetInlinePolicyForPermissionSetCommandError = async } }; +export const deserializeAws_json1_1GetPermissionsBoundaryForPermissionSetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetPermissionsBoundaryForPermissionSetCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetPermissionsBoundaryForPermissionSetResponse(data, context); + const response: GetPermissionsBoundaryForPermissionSetCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetPermissionsBoundaryForPermissionSetCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.ssoadmin#AccessDeniedException": + throw await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.ssoadmin#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.ssoadmin#ResourceNotFoundException": + throw await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.ssoadmin#ThrottlingException": + throw await deserializeAws_json1_1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.ssoadmin#ValidationException": + throw await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1ListAccountAssignmentCreationStatusCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1715,6 +2066,62 @@ const deserializeAws_json1_1ListAccountsForProvisionedPermissionSetCommandError } }; +export const deserializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetResponse(data, context); + const response: ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.ssoadmin#AccessDeniedException": + throw await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.ssoadmin#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.ssoadmin#ResourceNotFoundException": + throw await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.ssoadmin#ThrottlingException": + throw await deserializeAws_json1_1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.ssoadmin#ValidationException": + throw await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1ListInstancesCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2169,6 +2576,65 @@ const deserializeAws_json1_1PutInlinePolicyToPermissionSetCommandError = async ( } }; +export const deserializeAws_json1_1PutPermissionsBoundaryToPermissionSetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1PutPermissionsBoundaryToPermissionSetCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1PutPermissionsBoundaryToPermissionSetResponse(data, context); + const response: PutPermissionsBoundaryToPermissionSetCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1PutPermissionsBoundaryToPermissionSetCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.ssoadmin#AccessDeniedException": + throw await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context); + case "ConflictException": + case "com.amazonaws.ssoadmin#ConflictException": + throw await deserializeAws_json1_1ConflictExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.ssoadmin#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.ssoadmin#ResourceNotFoundException": + throw await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.ssoadmin#ThrottlingException": + throw await deserializeAws_json1_1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.ssoadmin#ValidationException": + throw await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1TagResourceCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2542,6 +3008,22 @@ const serializeAws_json1_1AccessControlAttributeValueSourceList = (input: string }); }; +const serializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetRequest = ( + input: AttachCustomerManagedPolicyReferenceToPermissionSetRequest, + context: __SerdeContext +): any => { + return { + ...(input.CustomerManagedPolicyReference != null && { + CustomerManagedPolicyReference: serializeAws_json1_1CustomerManagedPolicyReference( + input.CustomerManagedPolicyReference, + context + ), + }), + ...(input.InstanceArn != null && { InstanceArn: input.InstanceArn }), + ...(input.PermissionSetArn != null && { PermissionSetArn: input.PermissionSetArn }), + }; +}; + const serializeAws_json1_1AttachManagedPolicyToPermissionSetRequest = ( input: AttachManagedPolicyToPermissionSetRequest, context: __SerdeContext @@ -2596,6 +3078,16 @@ const serializeAws_json1_1CreatePermissionSetRequest = ( }; }; +const serializeAws_json1_1CustomerManagedPolicyReference = ( + input: CustomerManagedPolicyReference, + context: __SerdeContext +): any => { + return { + ...(input.Name != null && { Name: input.Name }), + ...(input.Path != null && { Path: input.Path }), + }; +}; + const serializeAws_json1_1DeleteAccountAssignmentRequest = ( input: DeleteAccountAssignmentRequest, context: __SerdeContext @@ -2629,6 +3121,16 @@ const serializeAws_json1_1DeleteInstanceAccessControlAttributeConfigurationReque }; }; +const serializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetRequest = ( + input: DeletePermissionsBoundaryFromPermissionSetRequest, + context: __SerdeContext +): any => { + return { + ...(input.InstanceArn != null && { InstanceArn: input.InstanceArn }), + ...(input.PermissionSetArn != null && { PermissionSetArn: input.PermissionSetArn }), + }; +}; + const serializeAws_json1_1DeletePermissionSetRequest = ( input: DeletePermissionSetRequest, context: __SerdeContext @@ -2694,6 +3196,22 @@ const serializeAws_json1_1DescribePermissionSetRequest = ( }; }; +const serializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetRequest = ( + input: DetachCustomerManagedPolicyReferenceFromPermissionSetRequest, + context: __SerdeContext +): any => { + return { + ...(input.CustomerManagedPolicyReference != null && { + CustomerManagedPolicyReference: serializeAws_json1_1CustomerManagedPolicyReference( + input.CustomerManagedPolicyReference, + context + ), + }), + ...(input.InstanceArn != null && { InstanceArn: input.InstanceArn }), + ...(input.PermissionSetArn != null && { PermissionSetArn: input.PermissionSetArn }), + }; +}; + const serializeAws_json1_1DetachManagedPolicyFromPermissionSetRequest = ( input: DetachManagedPolicyFromPermissionSetRequest, context: __SerdeContext @@ -2715,6 +3233,16 @@ const serializeAws_json1_1GetInlinePolicyForPermissionSetRequest = ( }; }; +const serializeAws_json1_1GetPermissionsBoundaryForPermissionSetRequest = ( + input: GetPermissionsBoundaryForPermissionSetRequest, + context: __SerdeContext +): any => { + return { + ...(input.InstanceArn != null && { InstanceArn: input.InstanceArn }), + ...(input.PermissionSetArn != null && { PermissionSetArn: input.PermissionSetArn }), + }; +}; + const serializeAws_json1_1InstanceAccessControlAttributeConfiguration = ( input: InstanceAccessControlAttributeConfiguration, context: __SerdeContext @@ -2776,6 +3304,18 @@ const serializeAws_json1_1ListAccountsForProvisionedPermissionSetRequest = ( }; }; +const serializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetRequest = ( + input: ListCustomerManagedPolicyReferencesInPermissionSetRequest, + context: __SerdeContext +): any => { + return { + ...(input.InstanceArn != null && { InstanceArn: input.InstanceArn }), + ...(input.MaxResults != null && { MaxResults: input.MaxResults }), + ...(input.NextToken != null && { NextToken: input.NextToken }), + ...(input.PermissionSetArn != null && { PermissionSetArn: input.PermissionSetArn }), + }; +}; + const serializeAws_json1_1ListInstancesRequest = (input: ListInstancesRequest, context: __SerdeContext): any => { return { ...(input.MaxResults != null && { MaxResults: input.MaxResults }), @@ -2848,6 +3388,18 @@ const serializeAws_json1_1OperationStatusFilter = (input: OperationStatusFilter, }; }; +const serializeAws_json1_1PermissionsBoundary = (input: PermissionsBoundary, context: __SerdeContext): any => { + return { + ...(input.CustomerManagedPolicyReference != null && { + CustomerManagedPolicyReference: serializeAws_json1_1CustomerManagedPolicyReference( + input.CustomerManagedPolicyReference, + context + ), + }), + ...(input.ManagedPolicyArn != null && { ManagedPolicyArn: input.ManagedPolicyArn }), + }; +}; + const serializeAws_json1_1ProvisionPermissionSetRequest = ( input: ProvisionPermissionSetRequest, context: __SerdeContext @@ -2871,6 +3423,19 @@ const serializeAws_json1_1PutInlinePolicyToPermissionSetRequest = ( }; }; +const serializeAws_json1_1PutPermissionsBoundaryToPermissionSetRequest = ( + input: PutPermissionsBoundaryToPermissionSetRequest, + context: __SerdeContext +): any => { + return { + ...(input.InstanceArn != null && { InstanceArn: input.InstanceArn }), + ...(input.PermissionSetArn != null && { PermissionSetArn: input.PermissionSetArn }), + ...(input.PermissionsBoundary != null && { + PermissionsBoundary: serializeAws_json1_1PermissionsBoundary(input.PermissionsBoundary, context), + }), + }; +}; + const serializeAws_json1_1Tag = (input: Tag, context: __SerdeContext): any => { return { ...(input.Key != null && { Key: input.Key }), @@ -3081,6 +3646,13 @@ const deserializeAws_json1_1AccountList = (output: any, context: __SerdeContext) return retVal; }; +const deserializeAws_json1_1AttachCustomerManagedPolicyReferenceToPermissionSetResponse = ( + output: any, + context: __SerdeContext +): AttachCustomerManagedPolicyReferenceToPermissionSetResponse => { + return {} as any; +}; + const deserializeAws_json1_1AttachedManagedPolicy = (output: any, context: __SerdeContext): AttachedManagedPolicy => { return { Arn: __expectString(output.Arn), @@ -3145,6 +3717,31 @@ const deserializeAws_json1_1CreatePermissionSetResponse = ( } as any; }; +const deserializeAws_json1_1CustomerManagedPolicyReference = ( + output: any, + context: __SerdeContext +): CustomerManagedPolicyReference => { + return { + Name: __expectString(output.Name), + Path: __expectString(output.Path), + } as any; +}; + +const deserializeAws_json1_1CustomerManagedPolicyReferenceList = ( + output: any, + context: __SerdeContext +): CustomerManagedPolicyReference[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1CustomerManagedPolicyReference(entry, context); + }); + return retVal; +}; + const deserializeAws_json1_1DeleteAccountAssignmentResponse = ( output: any, context: __SerdeContext @@ -3171,6 +3768,13 @@ const deserializeAws_json1_1DeleteInstanceAccessControlAttributeConfigurationRes return {} as any; }; +const deserializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetResponse = ( + output: any, + context: __SerdeContext +): DeletePermissionsBoundaryFromPermissionSetResponse => { + return {} as any; +}; + const deserializeAws_json1_1DeletePermissionSetResponse = ( output: any, context: __SerdeContext @@ -3241,6 +3845,13 @@ const deserializeAws_json1_1DescribePermissionSetResponse = ( } as any; }; +const deserializeAws_json1_1DetachCustomerManagedPolicyReferenceFromPermissionSetResponse = ( + output: any, + context: __SerdeContext +): DetachCustomerManagedPolicyReferenceFromPermissionSetResponse => { + return {} as any; +}; + const deserializeAws_json1_1DetachManagedPolicyFromPermissionSetResponse = ( output: any, context: __SerdeContext @@ -3257,6 +3868,18 @@ const deserializeAws_json1_1GetInlinePolicyForPermissionSetResponse = ( } as any; }; +const deserializeAws_json1_1GetPermissionsBoundaryForPermissionSetResponse = ( + output: any, + context: __SerdeContext +): GetPermissionsBoundaryForPermissionSetResponse => { + return { + PermissionsBoundary: + output.PermissionsBoundary != null + ? deserializeAws_json1_1PermissionsBoundary(output.PermissionsBoundary, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1InstanceAccessControlAttributeConfiguration = ( output: any, context: __SerdeContext @@ -3346,6 +3969,19 @@ const deserializeAws_json1_1ListAccountsForProvisionedPermissionSetResponse = ( } as any; }; +const deserializeAws_json1_1ListCustomerManagedPolicyReferencesInPermissionSetResponse = ( + output: any, + context: __SerdeContext +): ListCustomerManagedPolicyReferencesInPermissionSetResponse => { + return { + CustomerManagedPolicyReferences: + output.CustomerManagedPolicyReferences != null + ? deserializeAws_json1_1CustomerManagedPolicyReferenceList(output.CustomerManagedPolicyReferences, context) + : undefined, + NextToken: __expectString(output.NextToken), + } as any; +}; + const deserializeAws_json1_1ListInstancesResponse = (output: any, context: __SerdeContext): ListInstancesResponse => { return { Instances: output.Instances != null ? deserializeAws_json1_1InstanceList(output.Instances, context) : undefined, @@ -3415,6 +4051,16 @@ const deserializeAws_json1_1ListTagsForResourceResponse = ( } as any; }; +const deserializeAws_json1_1PermissionsBoundary = (output: any, context: __SerdeContext): PermissionsBoundary => { + return { + CustomerManagedPolicyReference: + output.CustomerManagedPolicyReference != null + ? deserializeAws_json1_1CustomerManagedPolicyReference(output.CustomerManagedPolicyReference, context) + : undefined, + ManagedPolicyArn: __expectString(output.ManagedPolicyArn), + } as any; +}; + const deserializeAws_json1_1PermissionSet = (output: any, context: __SerdeContext): PermissionSet => { return { CreatedDate: @@ -3506,6 +4152,13 @@ const deserializeAws_json1_1PutInlinePolicyToPermissionSetResponse = ( return {} as any; }; +const deserializeAws_json1_1PutPermissionsBoundaryToPermissionSetResponse = ( + output: any, + context: __SerdeContext +): PutPermissionsBoundaryToPermissionSetResponse => { + return {} as any; +}; + const deserializeAws_json1_1ResourceNotFoundException = ( output: any, context: __SerdeContext diff --git a/codegen/sdk-codegen/aws-models/sso-admin.json b/codegen/sdk-codegen/aws-models/sso-admin.json index 7ddea9c648c72..f3ca6d28c515a 100644 --- a/codegen/sdk-codegen/aws-models/sso-admin.json +++ b/codegen/sdk-codegen/aws-models/sso-admin.json @@ -85,7 +85,7 @@ } }, "traits": { - "smithy.api#documentation": "

    The value used for mapping a specified attribute to an identity source.

    " + "smithy.api#documentation": "

    The value used for mapping a specified attribute to an identity source. For more information, see Attribute mappings in the Amazon Web Services Single Sign-On User Guide.

    " } }, "com.amazonaws.ssoadmin#AccessControlAttributeValueSource": { @@ -261,6 +261,10 @@ "com.amazonaws.ssoadmin#AccountId": { "type": "string", "traits": { + "smithy.api#length": { + "min": 12, + "max": 12 + }, "smithy.api#pattern": "^\\d{12}$" } }, @@ -270,6 +274,71 @@ "target": "com.amazonaws.ssoadmin#AccountId" } }, + "com.amazonaws.ssoadmin#AttachCustomerManagedPolicyReferenceToPermissionSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssoadmin#AttachCustomerManagedPolicyReferenceToPermissionSetRequest" + }, + "output": { + "target": "com.amazonaws.ssoadmin#AttachCustomerManagedPolicyReferenceToPermissionSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ssoadmin#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssoadmin#ConflictException" + }, + { + "target": "com.amazonaws.ssoadmin#InternalServerException" + }, + { + "target": "com.amazonaws.ssoadmin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssoadmin#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.ssoadmin#ThrottlingException" + }, + { + "target": "com.amazonaws.ssoadmin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

    Attaches the specified IAM customer managed policy to the specified PermissionSet.

    " + } + }, + "com.amazonaws.ssoadmin#AttachCustomerManagedPolicyReferenceToPermissionSetRequest": { + "type": "structure", + "members": { + "InstanceArn": { + "target": "com.amazonaws.ssoadmin#InstanceArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the SSO instance under which the operation will be executed.

    ", + "smithy.api#required": {} + } + }, + "PermissionSetArn": { + "target": "com.amazonaws.ssoadmin#PermissionSetArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the PermissionSet.

    ", + "smithy.api#required": {} + } + }, + "CustomerManagedPolicyReference": { + "target": "com.amazonaws.ssoadmin#CustomerManagedPolicyReference", + "traits": { + "smithy.api#documentation": "

    Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set.

    ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.ssoadmin#AttachCustomerManagedPolicyReferenceToPermissionSetResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.ssoadmin#AttachManagedPolicyToPermissionSet": { "type": "operation", "input": { @@ -302,7 +371,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Attaches an IAM managed policy ARN to a permission set.

    \n \n

    If the permission set is already referenced by one or more account assignments, you will\n need to call \n ProvisionPermissionSet\n after this operation.\n Calling ProvisionPermissionSet applies the corresponding IAM policy updates to\n all assigned accounts.

    \n
    " + "smithy.api#documentation": "

    Attaches an Amazon Web Services managed IAM policy ARN to a permission set.

    \n \n

    If the permission set is already referenced by one or more account assignments, you will\n need to call \n ProvisionPermissionSet\n after this operation.\n Calling ProvisionPermissionSet applies the corresponding IAM policy updates to\n all assigned accounts.

    \n
    " } }, "com.amazonaws.ssoadmin#AttachManagedPolicyToPermissionSetRequest": { @@ -325,7 +394,7 @@ "ManagedPolicyArn": { "target": "com.amazonaws.ssoadmin#ManagedPolicyArn", "traits": { - "smithy.api#documentation": "

    The IAM managed policy ARN to be attached to a permission set.

    ", + "smithy.api#documentation": "

    The Amazon Web Services managed policy ARN to be attached to a permission set.

    ", "smithy.api#required": {} } } @@ -341,18 +410,18 @@ "Name": { "target": "com.amazonaws.ssoadmin#Name", "traits": { - "smithy.api#documentation": "

    The name of the IAM managed policy.

    " + "smithy.api#documentation": "

    The name of the Amazon Web Services managed IAM policy.

    " } }, "Arn": { "target": "com.amazonaws.ssoadmin#ManagedPolicyArn", "traits": { - "smithy.api#documentation": "

    The ARN of the IAM managed policy. For more information about ARNs, see Amazon Resource\nNames (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

    " + "smithy.api#documentation": "

    The ARN of the Amazon Web Services managed IAM policy. For more information about ARNs, see Amazon Resource\nNames (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference.

    " } } }, "traits": { - "smithy.api#documentation": "

    A structure that stores the details of the IAM managed policy.

    " + "smithy.api#documentation": "

    A structure that stores the details of the Amazon Web Services managed IAM policy.

    " } }, "com.amazonaws.ssoadmin#AttachedManagedPolicyList": { @@ -614,6 +683,33 @@ } } }, + "com.amazonaws.ssoadmin#CustomerManagedPolicyReference": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.ssoadmin#ManagedPolicyName", + "traits": { + "smithy.api#documentation": "

    The name of the policy document.

    ", + "smithy.api#required": {} + } + }, + "Path": { + "target": "com.amazonaws.ssoadmin#ManagedPolicyPath", + "traits": { + "smithy.api#documentation": "

    The path for the policy. The default is /. For more information, see Friendly names and paths in the Identity and Access Management user guide.

    " + } + } + }, + "traits": { + "smithy.api#documentation": "

    Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set.

    " + } + }, + "com.amazonaws.ssoadmin#CustomerManagedPolicyReferenceList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssoadmin#CustomerManagedPolicyReference" + } + }, "com.amazonaws.ssoadmin#Date": { "type": "timestamp" }, @@ -865,6 +961,58 @@ "type": "structure", "members": {} }, + "com.amazonaws.ssoadmin#DeletePermissionsBoundaryFromPermissionSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssoadmin#DeletePermissionsBoundaryFromPermissionSetRequest" + }, + "output": { + "target": "com.amazonaws.ssoadmin#DeletePermissionsBoundaryFromPermissionSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ssoadmin#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssoadmin#InternalServerException" + }, + { + "target": "com.amazonaws.ssoadmin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssoadmin#ThrottlingException" + }, + { + "target": "com.amazonaws.ssoadmin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

    Deletes the permissions boundary from a specified PermissionSet.

    " + } + }, + "com.amazonaws.ssoadmin#DeletePermissionsBoundaryFromPermissionSetRequest": { + "type": "structure", + "members": { + "InstanceArn": { + "target": "com.amazonaws.ssoadmin#InstanceArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the SSO instance under which the operation will be executed.

    ", + "smithy.api#required": {} + } + }, + "PermissionSetArn": { + "target": "com.amazonaws.ssoadmin#PermissionSetArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the PermissionSet.

    ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.ssoadmin#DeletePermissionsBoundaryFromPermissionSetResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.ssoadmin#DescribeAccountAssignmentCreationStatus": { "type": "operation", "input": { @@ -1165,6 +1313,68 @@ } } }, + "com.amazonaws.ssoadmin#DetachCustomerManagedPolicyReferenceFromPermissionSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssoadmin#DetachCustomerManagedPolicyReferenceFromPermissionSetRequest" + }, + "output": { + "target": "com.amazonaws.ssoadmin#DetachCustomerManagedPolicyReferenceFromPermissionSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ssoadmin#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssoadmin#ConflictException" + }, + { + "target": "com.amazonaws.ssoadmin#InternalServerException" + }, + { + "target": "com.amazonaws.ssoadmin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssoadmin#ThrottlingException" + }, + { + "target": "com.amazonaws.ssoadmin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

    Detaches the specified IAM customer managed policy from the specified PermissionSet.

    " + } + }, + "com.amazonaws.ssoadmin#DetachCustomerManagedPolicyReferenceFromPermissionSetRequest": { + "type": "structure", + "members": { + "InstanceArn": { + "target": "com.amazonaws.ssoadmin#InstanceArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the SSO instance under which the operation will be executed.

    ", + "smithy.api#required": {} + } + }, + "PermissionSetArn": { + "target": "com.amazonaws.ssoadmin#PermissionSetArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the PermissionSet.

    ", + "smithy.api#required": {} + } + }, + "CustomerManagedPolicyReference": { + "target": "com.amazonaws.ssoadmin#CustomerManagedPolicyReference", + "traits": { + "smithy.api#documentation": "

    Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set.

    ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.ssoadmin#DetachCustomerManagedPolicyReferenceFromPermissionSetResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.ssoadmin#DetachManagedPolicyFromPermissionSet": { "type": "operation", "input": { @@ -1194,7 +1404,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Detaches the attached IAM managed policy ARN from the specified permission set.

    " + "smithy.api#documentation": "

    Detaches the attached Amazon Web Services managed IAM policy ARN from the specified permission set.

    " } }, "com.amazonaws.ssoadmin#DetachManagedPolicyFromPermissionSetRequest": { @@ -1217,7 +1427,7 @@ "ManagedPolicyArn": { "target": "com.amazonaws.ssoadmin#ManagedPolicyArn", "traits": { - "smithy.api#documentation": "

    The IAM managed policy ARN to be attached to a permission set.

    ", + "smithy.api#documentation": "

    The Amazon Web Services managed policy ARN to be detached from a permission set.

    ", "smithy.api#required": {} } } @@ -1237,16 +1447,6 @@ "smithy.api#pattern": "^(-?)P(?=\\d|T\\d)(?:(\\d+)Y)?(?:(\\d+)M)?(?:(\\d+)([DW]))?(?:T(?:(\\d+)H)?(?:(\\d+)M)?(?:(\\d+(?:\\.\\d+)?)S)?)?$" } }, - "com.amazonaws.ssoadmin#GeneralArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 10, - "max": 2048 - }, - "smithy.api#pattern": "^arn:aws:sso:([a-zA-Z0-9-]+)?:(\\d{12})?:[a-zA-Z0-9-]+/[a-zA-Z0-9-/.]+$" - } - }, "com.amazonaws.ssoadmin#GetInlinePolicyForPermissionSet": { "type": "operation", "input": { @@ -1306,6 +1506,65 @@ } } }, + "com.amazonaws.ssoadmin#GetPermissionsBoundaryForPermissionSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssoadmin#GetPermissionsBoundaryForPermissionSetRequest" + }, + "output": { + "target": "com.amazonaws.ssoadmin#GetPermissionsBoundaryForPermissionSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ssoadmin#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssoadmin#InternalServerException" + }, + { + "target": "com.amazonaws.ssoadmin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssoadmin#ThrottlingException" + }, + { + "target": "com.amazonaws.ssoadmin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

    Obtains the permissions boundary for a specified PermissionSet.

    " + } + }, + "com.amazonaws.ssoadmin#GetPermissionsBoundaryForPermissionSetRequest": { + "type": "structure", + "members": { + "InstanceArn": { + "target": "com.amazonaws.ssoadmin#InstanceArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the SSO instance under which the operation will be executed.

    ", + "smithy.api#required": {} + } + }, + "PermissionSetArn": { + "target": "com.amazonaws.ssoadmin#PermissionSetArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the PermissionSet.

    ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.ssoadmin#GetPermissionsBoundaryForPermissionSetResponse": { + "type": "structure", + "members": { + "PermissionsBoundary": { + "target": "com.amazonaws.ssoadmin#PermissionsBoundary", + "traits": { + "smithy.api#documentation": "

    The permissions boundary attached to the specified permission set.

    " + } + } + } + }, "com.amazonaws.ssoadmin#Id": { "type": "string", "traits": { @@ -1360,7 +1619,7 @@ "min": 10, "max": 1224 }, - "smithy.api#pattern": "^arn:aws:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$" + "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}$" } }, "com.amazonaws.ssoadmin#InstanceList": { @@ -1748,6 +2007,89 @@ } } }, + "com.amazonaws.ssoadmin#ListCustomerManagedPolicyReferencesInPermissionSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssoadmin#ListCustomerManagedPolicyReferencesInPermissionSetRequest" + }, + "output": { + "target": "com.amazonaws.ssoadmin#ListCustomerManagedPolicyReferencesInPermissionSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ssoadmin#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssoadmin#InternalServerException" + }, + { + "target": "com.amazonaws.ssoadmin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssoadmin#ThrottlingException" + }, + { + "target": "com.amazonaws.ssoadmin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

    Lists all IAM customer managed policies attached to a specified PermissionSet.

    ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "CustomerManagedPolicyReferences", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.ssoadmin#ListCustomerManagedPolicyReferencesInPermissionSetRequest": { + "type": "structure", + "members": { + "InstanceArn": { + "target": "com.amazonaws.ssoadmin#InstanceArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the SSO instance under which the operation will be executed.

    ", + "smithy.api#required": {} + } + }, + "PermissionSetArn": { + "target": "com.amazonaws.ssoadmin#PermissionSetArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the PermissionSet.

    ", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.ssoadmin#MaxResults", + "traits": { + "smithy.api#documentation": "

    The maximum number of results to display for the list call.

    " + } + }, + "NextToken": { + "target": "com.amazonaws.ssoadmin#Token", + "traits": { + "smithy.api#documentation": "

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + } + }, + "com.amazonaws.ssoadmin#ListCustomerManagedPolicyReferencesInPermissionSetResponse": { + "type": "structure", + "members": { + "CustomerManagedPolicyReferences": { + "target": "com.amazonaws.ssoadmin#CustomerManagedPolicyReferenceList", + "traits": { + "smithy.api#documentation": "

    Specifies the names and paths of the IAM customer managed policies that you have attached to your permission set.

    " + } + }, + "NextToken": { + "target": "com.amazonaws.ssoadmin#Token", + "traits": { + "smithy.api#documentation": "

    The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls.

    " + } + } + } + }, "com.amazonaws.ssoadmin#ListInstances": { "type": "operation", "input": { @@ -1840,7 +2182,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Lists the IAM managed policy that is attached to a specified permission set.

    ", + "smithy.api#documentation": "

    Lists the Amazon Web Services managed IAM policy that is attached to a specified permission set.

    ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -2189,7 +2531,7 @@ } }, "ResourceArn": { - "target": "com.amazonaws.ssoadmin#GeneralArn", + "target": "com.amazonaws.ssoadmin#TaggableResourceArn", "traits": { "smithy.api#documentation": "

    The ARN of the resource with the tags to be listed.

    ", "smithy.api#required": {} @@ -2226,7 +2568,28 @@ "smithy.api#length": { "min": 20, "max": 2048 - } + }, + "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):iam::aws:policy/[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]+$" + } + }, + "com.amazonaws.ssoadmin#ManagedPolicyName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[\\w+=,.@-]+$" + } + }, + "com.amazonaws.ssoadmin#ManagedPolicyPath": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + }, + "smithy.api#pattern": "^((/[A-Za-z0-9\\.,\\+@=_-]+)*)/$" } }, "com.amazonaws.ssoadmin#MaxResults": { @@ -2313,7 +2676,7 @@ "min": 10, "max": 1224 }, - "smithy.api#pattern": "^arn:aws:sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}$" + "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}$" } }, "com.amazonaws.ssoadmin#PermissionSetDescription": { @@ -2323,7 +2686,7 @@ "min": 1, "max": 700 }, - "smithy.api#pattern": "^[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*$" + "smithy.api#pattern": "^[\\u0009\\u000A\\u000D\\u0020-\\u007E\\u00A0-\\u00FF]*$" } }, "com.amazonaws.ssoadmin#PermissionSetList": { @@ -2349,8 +2712,7 @@ "min": 1, "max": 10240 }, - "smithy.api#pattern": "^[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+$", - "smithy.api#sensitive": {} + "smithy.api#pattern": "^[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+$" } }, "com.amazonaws.ssoadmin#PermissionSetProvisioningStatus": { @@ -2429,6 +2791,26 @@ "smithy.api#documentation": "

    Provides information about the permission set provisioning status.

    " } }, + "com.amazonaws.ssoadmin#PermissionsBoundary": { + "type": "structure", + "members": { + "CustomerManagedPolicyReference": { + "target": "com.amazonaws.ssoadmin#CustomerManagedPolicyReference", + "traits": { + "smithy.api#documentation": "

    Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set.

    " + } + }, + "ManagedPolicyArn": { + "target": "com.amazonaws.ssoadmin#ManagedPolicyArn", + "traits": { + "smithy.api#documentation": "

    The Amazon Web Services managed policy ARN that you want to attach to a permission set as a permissions boundary.

    " + } + } + }, + "traits": { + "smithy.api#documentation": "

    Specifies the configuration of the Amazon Web Services managed or customer managed policy that you want to set as a permissions boundary. Specify either CustomerManagedPolicyReference \n to use the name and path of a customer managed policy, or ManagedPolicyArn to use the ARN of an Amazon Web Services managed IAM policy. A permissions boundary represents the maximum permissions that any policy\n can grant your role. For more information, see Permissions boundaries for IAM entities in the Identity and Access Management User Guide.

    \n \n

    Policies used as permissions boundaries do not provide permissions. You must also attach an IAM policy to the role. To learn how the effective permissions for a role are evaluated, see IAM JSON policy evaluation logic in the Identity and Access Management User Guide.

    \n
    " + } + }, "com.amazonaws.ssoadmin#PrincipalId": { "type": "string", "traits": { @@ -2624,6 +3006,68 @@ "type": "structure", "members": {} }, + "com.amazonaws.ssoadmin#PutPermissionsBoundaryToPermissionSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssoadmin#PutPermissionsBoundaryToPermissionSetRequest" + }, + "output": { + "target": "com.amazonaws.ssoadmin#PutPermissionsBoundaryToPermissionSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ssoadmin#AccessDeniedException" + }, + { + "target": "com.amazonaws.ssoadmin#ConflictException" + }, + { + "target": "com.amazonaws.ssoadmin#InternalServerException" + }, + { + "target": "com.amazonaws.ssoadmin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssoadmin#ThrottlingException" + }, + { + "target": "com.amazonaws.ssoadmin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

    Attaches an Amazon Web Services managed or customer managed IAM policy to the specified PermissionSet as a permissions boundary.

    " + } + }, + "com.amazonaws.ssoadmin#PutPermissionsBoundaryToPermissionSetRequest": { + "type": "structure", + "members": { + "InstanceArn": { + "target": "com.amazonaws.ssoadmin#InstanceArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the SSO instance under which the operation will be executed.

    ", + "smithy.api#required": {} + } + }, + "PermissionSetArn": { + "target": "com.amazonaws.ssoadmin#PermissionSetArn", + "traits": { + "smithy.api#documentation": "

    The ARN of the PermissionSet.

    ", + "smithy.api#required": {} + } + }, + "PermissionsBoundary": { + "target": "com.amazonaws.ssoadmin#PermissionsBoundary", + "traits": { + "smithy.api#documentation": "

    The permissions boundary that you want to attach to a PermissionSet.

    ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.ssoadmin#PutPermissionsBoundaryToPermissionSetResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.ssoadmin#Reason": { "type": "string", "traits": { @@ -2658,8 +3102,26 @@ }, "com.amazonaws.ssoadmin#SWBExternalService": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "SSO Admin", + "arnNamespace": "sso", + "cloudFormationName": "SSOAdmin", + "cloudTrailEventSource": "ssoadmin.amazonaws.com", + "endpointPrefix": "sso" + }, + "aws.auth#sigv4": { + "name": "sso" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "

    ", + "smithy.api#title": "AWS Single Sign-On Admin" + }, "version": "2020-07-20", "operations": [ + { + "target": "com.amazonaws.ssoadmin#AttachCustomerManagedPolicyReferenceToPermissionSet" + }, { "target": "com.amazonaws.ssoadmin#AttachManagedPolicyToPermissionSet" }, @@ -2681,6 +3143,9 @@ { "target": "com.amazonaws.ssoadmin#DeleteInstanceAccessControlAttributeConfiguration" }, + { + "target": "com.amazonaws.ssoadmin#DeletePermissionsBoundaryFromPermissionSet" + }, { "target": "com.amazonaws.ssoadmin#DeletePermissionSet" }, @@ -2699,12 +3164,18 @@ { "target": "com.amazonaws.ssoadmin#DescribePermissionSetProvisioningStatus" }, + { + "target": "com.amazonaws.ssoadmin#DetachCustomerManagedPolicyReferenceFromPermissionSet" + }, { "target": "com.amazonaws.ssoadmin#DetachManagedPolicyFromPermissionSet" }, { "target": "com.amazonaws.ssoadmin#GetInlinePolicyForPermissionSet" }, + { + "target": "com.amazonaws.ssoadmin#GetPermissionsBoundaryForPermissionSet" + }, { "target": "com.amazonaws.ssoadmin#ListAccountAssignmentCreationStatus" }, @@ -2717,6 +3188,9 @@ { "target": "com.amazonaws.ssoadmin#ListAccountsForProvisionedPermissionSet" }, + { + "target": "com.amazonaws.ssoadmin#ListCustomerManagedPolicyReferencesInPermissionSet" + }, { "target": "com.amazonaws.ssoadmin#ListInstances" }, @@ -2741,6 +3215,9 @@ { "target": "com.amazonaws.ssoadmin#PutInlinePolicyToPermissionSet" }, + { + "target": "com.amazonaws.ssoadmin#PutPermissionsBoundaryToPermissionSet" + }, { "target": "com.amazonaws.ssoadmin#TagResource" }, @@ -2753,22 +3230,7 @@ { "target": "com.amazonaws.ssoadmin#UpdatePermissionSet" } - ], - "traits": { - "aws.api#service": { - "sdkId": "SSO Admin", - "arnNamespace": "sso", - "cloudFormationName": "SSOAdmin", - "cloudTrailEventSource": "ssoadmin.amazonaws.com", - "endpointPrefix": "sso" - }, - "aws.auth#sigv4": { - "name": "sso" - }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

    Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO\n access to multiple Amazon Web Services accounts and business applications. This guide provides information on\n SSO operations which could be used for access management of Amazon Web Services accounts. For information about\n Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide.

    \n

    Many operations in the SSO APIs rely on identifiers for users and groups, known as\n principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO,\n see the Amazon Web Services SSO Identity Store API\n Reference.

    ", - "smithy.api#title": "AWS Single Sign-On Admin" - } + ] }, "com.amazonaws.ssoadmin#ServiceQuotaExceededException": { "type": "structure", @@ -2811,13 +3273,15 @@ "Key": { "target": "com.amazonaws.ssoadmin#TagKey", "traits": { - "smithy.api#documentation": "

    The key for the tag.

    " + "smithy.api#documentation": "

    The key for the tag.

    ", + "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.ssoadmin#TagValue", "traits": { - "smithy.api#documentation": "

    The value of the tag.

    " + "smithy.api#documentation": "

    The value of the tag.

    ", + "smithy.api#required": {} } } }, @@ -2905,7 +3369,7 @@ } }, "ResourceArn": { - "target": "com.amazonaws.ssoadmin#GeneralArn", + "target": "com.amazonaws.ssoadmin#TaggableResourceArn", "traits": { "smithy.api#documentation": "

    The ARN of the resource with the tags to be listed.

    ", "smithy.api#required": {} @@ -2934,9 +3398,23 @@ "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" } }, + "com.amazonaws.ssoadmin#TaggableResourceArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 10, + "max": 2048 + }, + "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):sso:::permissionSet/(sso)?ins-[a-zA-Z0-9-.]{16}/ps-[a-zA-Z0-9-./]{16}$" + } + }, "com.amazonaws.ssoadmin#TargetId": { "type": "string", "traits": { + "smithy.api#length": { + "min": 12, + "max": 12 + }, "smithy.api#pattern": "^\\d{12}$" } }, @@ -2980,6 +3458,10 @@ "com.amazonaws.ssoadmin#UUId": { "type": "string", "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, "smithy.api#pattern": "^\\b[0-9a-f]{8}\\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\\b[0-9a-f]{12}\\b$" } }, @@ -3026,7 +3508,7 @@ } }, "ResourceArn": { - "target": "com.amazonaws.ssoadmin#GeneralArn", + "target": "com.amazonaws.ssoadmin#TaggableResourceArn", "traits": { "smithy.api#documentation": "

    The ARN of the resource with the tags to be listed.

    ", "smithy.api#required": {} From 3f891393cfe5002f24317a340f88ce0e1df7aa44 Mon Sep 17 00:00:00 2001 From: awstools Date: Mon, 18 Jul 2022 18:30:52 +0000 Subject: [PATCH 07/31] feat(client-sagemaker): Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. --- clients/client-sagemaker/src/SageMaker.ts | 333 + .../client-sagemaker/src/SageMakerClient.ts | 51 + .../CreateEdgeDeploymentPlanCommand.ts | 96 + .../CreateEdgeDeploymentStageCommand.ts | 99 + .../CreateHyperParameterTuningJobCommand.ts | 2 +- .../DeleteEdgeDeploymentPlanCommand.ts | 96 + .../DeleteEdgeDeploymentStageCommand.ts | 99 + .../DescribeEdgeDeploymentPlanCommand.ts | 99 + .../DescribeHyperParameterTuningJobCommand.ts | 2 +- .../src/commands/DescribeImageCommand.ts | 2 +- .../commands/DescribeImageVersionCommand.ts | 2 +- ...cribeInferenceRecommendationsJobCommand.ts | 6 +- .../ListEdgeDeploymentPlansCommand.ts | 96 + .../ListPipelineExecutionStepsCommand.ts | 2 +- ...stPipelineParametersForExecutionCommand.ts | 2 +- .../src/commands/ListPipelinesCommand.ts | 2 +- .../src/commands/ListProcessingJobsCommand.ts | 2 +- .../src/commands/ListStageDevicesCommand.ts | 96 + .../StartEdgeDeploymentStageCommand.ts | 96 + .../StopEdgeDeploymentStageCommand.ts | 96 + .../client-sagemaker/src/commands/index.ts | 9 + .../client-sagemaker/src/models/models_0.ts | 704 +- .../client-sagemaker/src/models/models_1.ts | 1355 +- .../client-sagemaker/src/models/models_2.ts | 11423 ++++++++-------- .../client-sagemaker/src/models/models_3.ts | 916 +- .../ListEdgeDeploymentPlansPaginator.ts | 61 + .../pagination/ListStageDevicesPaginator.ts | 61 + .../client-sagemaker/src/pagination/index.ts | 2 + .../src/protocols/Aws_json1_1.ts | 1034 +- codegen/sdk-codegen/aws-models/sagemaker.json | 1056 ++ 30 files changed, 11019 insertions(+), 6881 deletions(-) create mode 100644 clients/client-sagemaker/src/commands/CreateEdgeDeploymentPlanCommand.ts create mode 100644 clients/client-sagemaker/src/commands/CreateEdgeDeploymentStageCommand.ts create mode 100644 clients/client-sagemaker/src/commands/DeleteEdgeDeploymentPlanCommand.ts create mode 100644 clients/client-sagemaker/src/commands/DeleteEdgeDeploymentStageCommand.ts create mode 100644 clients/client-sagemaker/src/commands/DescribeEdgeDeploymentPlanCommand.ts create mode 100644 clients/client-sagemaker/src/commands/ListEdgeDeploymentPlansCommand.ts create mode 100644 clients/client-sagemaker/src/commands/ListStageDevicesCommand.ts create mode 100644 clients/client-sagemaker/src/commands/StartEdgeDeploymentStageCommand.ts create mode 100644 clients/client-sagemaker/src/commands/StopEdgeDeploymentStageCommand.ts create mode 100644 clients/client-sagemaker/src/pagination/ListEdgeDeploymentPlansPaginator.ts create mode 100644 clients/client-sagemaker/src/pagination/ListStageDevicesPaginator.ts diff --git a/clients/client-sagemaker/src/SageMaker.ts b/clients/client-sagemaker/src/SageMaker.ts index 15822b642bf43..30e9aa8a3820b 100644 --- a/clients/client-sagemaker/src/SageMaker.ts +++ b/clients/client-sagemaker/src/SageMaker.ts @@ -73,6 +73,16 @@ import { CreateDomainCommandInput, CreateDomainCommandOutput, } from "./commands/CreateDomainCommand"; +import { + CreateEdgeDeploymentPlanCommand, + CreateEdgeDeploymentPlanCommandInput, + CreateEdgeDeploymentPlanCommandOutput, +} from "./commands/CreateEdgeDeploymentPlanCommand"; +import { + CreateEdgeDeploymentStageCommand, + CreateEdgeDeploymentStageCommandInput, + CreateEdgeDeploymentStageCommandOutput, +} from "./commands/CreateEdgeDeploymentStageCommand"; import { CreateEdgePackagingJobCommand, CreateEdgePackagingJobCommandInput, @@ -282,6 +292,16 @@ import { DeleteDomainCommandInput, DeleteDomainCommandOutput, } from "./commands/DeleteDomainCommand"; +import { + DeleteEdgeDeploymentPlanCommand, + DeleteEdgeDeploymentPlanCommandInput, + DeleteEdgeDeploymentPlanCommandOutput, +} from "./commands/DeleteEdgeDeploymentPlanCommand"; +import { + DeleteEdgeDeploymentStageCommand, + DeleteEdgeDeploymentStageCommandInput, + DeleteEdgeDeploymentStageCommandOutput, +} from "./commands/DeleteEdgeDeploymentStageCommand"; import { DeleteEndpointCommand, DeleteEndpointCommandInput, @@ -467,6 +487,11 @@ import { DescribeDomainCommandInput, DescribeDomainCommandOutput, } from "./commands/DescribeDomainCommand"; +import { + DescribeEdgeDeploymentPlanCommand, + DescribeEdgeDeploymentPlanCommandInput, + DescribeEdgeDeploymentPlanCommandOutput, +} from "./commands/DescribeEdgeDeploymentPlanCommand"; import { DescribeEdgePackagingJobCommand, DescribeEdgePackagingJobCommandInput, @@ -751,6 +776,11 @@ import { } from "./commands/ListDeviceFleetsCommand"; import { ListDevicesCommand, ListDevicesCommandInput, ListDevicesCommandOutput } from "./commands/ListDevicesCommand"; import { ListDomainsCommand, ListDomainsCommandInput, ListDomainsCommandOutput } from "./commands/ListDomainsCommand"; +import { + ListEdgeDeploymentPlansCommand, + ListEdgeDeploymentPlansCommandInput, + ListEdgeDeploymentPlansCommandOutput, +} from "./commands/ListEdgeDeploymentPlansCommand"; import { ListEdgePackagingJobsCommand, ListEdgePackagingJobsCommandInput, @@ -898,6 +928,11 @@ import { ListProjectsCommandInput, ListProjectsCommandOutput, } from "./commands/ListProjectsCommand"; +import { + ListStageDevicesCommand, + ListStageDevicesCommandInput, + ListStageDevicesCommandOutput, +} from "./commands/ListStageDevicesCommand"; import { ListStudioLifecycleConfigsCommand, ListStudioLifecycleConfigsCommandInput, @@ -981,6 +1016,11 @@ import { SendPipelineExecutionStepSuccessCommandInput, SendPipelineExecutionStepSuccessCommandOutput, } from "./commands/SendPipelineExecutionStepSuccessCommand"; +import { + StartEdgeDeploymentStageCommand, + StartEdgeDeploymentStageCommandInput, + StartEdgeDeploymentStageCommandOutput, +} from "./commands/StartEdgeDeploymentStageCommand"; import { StartMonitoringScheduleCommand, StartMonitoringScheduleCommandInput, @@ -1006,6 +1046,11 @@ import { StopCompilationJobCommandInput, StopCompilationJobCommandOutput, } from "./commands/StopCompilationJobCommand"; +import { + StopEdgeDeploymentStageCommand, + StopEdgeDeploymentStageCommandInput, + StopEdgeDeploymentStageCommandOutput, +} from "./commands/StopEdgeDeploymentStageCommand"; import { StopEdgePackagingJobCommand, StopEdgePackagingJobCommandInput, @@ -1833,6 +1878,70 @@ export class SageMaker extends SageMakerClient { } } + /** + *

    Creates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment configuration and devices.

    + */ + public createEdgeDeploymentPlan( + args: CreateEdgeDeploymentPlanCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createEdgeDeploymentPlan( + args: CreateEdgeDeploymentPlanCommandInput, + cb: (err: any, data?: CreateEdgeDeploymentPlanCommandOutput) => void + ): void; + public createEdgeDeploymentPlan( + args: CreateEdgeDeploymentPlanCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateEdgeDeploymentPlanCommandOutput) => void + ): void; + public createEdgeDeploymentPlan( + args: CreateEdgeDeploymentPlanCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateEdgeDeploymentPlanCommandOutput) => void), + cb?: (err: any, data?: CreateEdgeDeploymentPlanCommandOutput) => void + ): Promise | void { + const command = new CreateEdgeDeploymentPlanCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

    Creates a new stage in an existing edge deployment plan.

    + */ + public createEdgeDeploymentStage( + args: CreateEdgeDeploymentStageCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createEdgeDeploymentStage( + args: CreateEdgeDeploymentStageCommandInput, + cb: (err: any, data?: CreateEdgeDeploymentStageCommandOutput) => void + ): void; + public createEdgeDeploymentStage( + args: CreateEdgeDeploymentStageCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateEdgeDeploymentStageCommandOutput) => void + ): void; + public createEdgeDeploymentStage( + args: CreateEdgeDeploymentStageCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateEdgeDeploymentStageCommandOutput) => void), + cb?: (err: any, data?: CreateEdgeDeploymentStageCommandOutput) => void + ): Promise | void { + const command = new CreateEdgeDeploymentStageCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Starts a SageMaker Edge Manager model packaging job. Edge Manager will use the model artifacts from the Amazon Simple Storage Service bucket that you specify. After the model has been packaged, Amazon SageMaker saves the resulting artifacts to an S3 bucket that you specify.

    */ @@ -3708,6 +3817,70 @@ export class SageMaker extends SageMakerClient { } } + /** + *

    Deletes an edge deployment plan if (and only if) all the stages in the plan are inactive or there are no stages in the plan.

    + */ + public deleteEdgeDeploymentPlan( + args: DeleteEdgeDeploymentPlanCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteEdgeDeploymentPlan( + args: DeleteEdgeDeploymentPlanCommandInput, + cb: (err: any, data?: DeleteEdgeDeploymentPlanCommandOutput) => void + ): void; + public deleteEdgeDeploymentPlan( + args: DeleteEdgeDeploymentPlanCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteEdgeDeploymentPlanCommandOutput) => void + ): void; + public deleteEdgeDeploymentPlan( + args: DeleteEdgeDeploymentPlanCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteEdgeDeploymentPlanCommandOutput) => void), + cb?: (err: any, data?: DeleteEdgeDeploymentPlanCommandOutput) => void + ): Promise | void { + const command = new DeleteEdgeDeploymentPlanCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

    Delete a stage in an edge deployment plan if (and only if) the stage is inactive.

    + */ + public deleteEdgeDeploymentStage( + args: DeleteEdgeDeploymentStageCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteEdgeDeploymentStage( + args: DeleteEdgeDeploymentStageCommandInput, + cb: (err: any, data?: DeleteEdgeDeploymentStageCommandOutput) => void + ): void; + public deleteEdgeDeploymentStage( + args: DeleteEdgeDeploymentStageCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteEdgeDeploymentStageCommandOutput) => void + ): void; + public deleteEdgeDeploymentStage( + args: DeleteEdgeDeploymentStageCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteEdgeDeploymentStageCommandOutput) => void), + cb?: (err: any, data?: DeleteEdgeDeploymentStageCommandOutput) => void + ): Promise | void { + const command = new DeleteEdgeDeploymentStageCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Deletes an endpoint. SageMaker frees up all of the resources that were deployed when the * endpoint was created.

    @@ -5070,6 +5243,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *

    Describes an edge deployment plan with deployment status per stage.

    + */ + public describeEdgeDeploymentPlan( + args: DescribeEdgeDeploymentPlanCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeEdgeDeploymentPlan( + args: DescribeEdgeDeploymentPlanCommandInput, + cb: (err: any, data?: DescribeEdgeDeploymentPlanCommandOutput) => void + ): void; + public describeEdgeDeploymentPlan( + args: DescribeEdgeDeploymentPlanCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeEdgeDeploymentPlanCommandOutput) => void + ): void; + public describeEdgeDeploymentPlan( + args: DescribeEdgeDeploymentPlanCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeEdgeDeploymentPlanCommandOutput) => void), + cb?: (err: any, data?: DescribeEdgeDeploymentPlanCommandOutput) => void + ): Promise | void { + const command = new DescribeEdgeDeploymentPlanCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    A description of edge packaging jobs.

    */ @@ -7020,6 +7225,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *

    Lists all edge deployment plans.

    + */ + public listEdgeDeploymentPlans( + args: ListEdgeDeploymentPlansCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listEdgeDeploymentPlans( + args: ListEdgeDeploymentPlansCommandInput, + cb: (err: any, data?: ListEdgeDeploymentPlansCommandOutput) => void + ): void; + public listEdgeDeploymentPlans( + args: ListEdgeDeploymentPlansCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListEdgeDeploymentPlansCommandOutput) => void + ): void; + public listEdgeDeploymentPlans( + args: ListEdgeDeploymentPlansCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListEdgeDeploymentPlansCommandOutput) => void), + cb?: (err: any, data?: ListEdgeDeploymentPlansCommandOutput) => void + ): Promise | void { + const command = new ListEdgeDeploymentPlansCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Returns a list of edge packaging jobs.

    */ @@ -8009,6 +8246,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *

    Lists devices allocated to the stage, containing detailed device information and deployment status.

    + */ + public listStageDevices( + args: ListStageDevicesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listStageDevices( + args: ListStageDevicesCommandInput, + cb: (err: any, data?: ListStageDevicesCommandOutput) => void + ): void; + public listStageDevices( + args: ListStageDevicesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListStageDevicesCommandOutput) => void + ): void; + public listStageDevices( + args: ListStageDevicesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListStageDevicesCommandOutput) => void), + cb?: (err: any, data?: ListStageDevicesCommandOutput) => void + ): Promise | void { + const command = new ListStageDevicesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Lists the Studio Lifecycle Configurations in your Amazon Web Services Account.

    */ @@ -8659,6 +8928,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *

    Starts a stage in an edge deployment plan.

    + */ + public startEdgeDeploymentStage( + args: StartEdgeDeploymentStageCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public startEdgeDeploymentStage( + args: StartEdgeDeploymentStageCommandInput, + cb: (err: any, data?: StartEdgeDeploymentStageCommandOutput) => void + ): void; + public startEdgeDeploymentStage( + args: StartEdgeDeploymentStageCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StartEdgeDeploymentStageCommandOutput) => void + ): void; + public startEdgeDeploymentStage( + args: StartEdgeDeploymentStageCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StartEdgeDeploymentStageCommandOutput) => void), + cb?: (err: any, data?: StartEdgeDeploymentStageCommandOutput) => void + ): Promise | void { + const command = new StartEdgeDeploymentStageCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Starts a previously stopped monitoring schedule.

    * @@ -8831,6 +9132,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *

    Stops a stage in an edge deployment plan.

    + */ + public stopEdgeDeploymentStage( + args: StopEdgeDeploymentStageCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public stopEdgeDeploymentStage( + args: StopEdgeDeploymentStageCommandInput, + cb: (err: any, data?: StopEdgeDeploymentStageCommandOutput) => void + ): void; + public stopEdgeDeploymentStage( + args: StopEdgeDeploymentStageCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StopEdgeDeploymentStageCommandOutput) => void + ): void; + public stopEdgeDeploymentStage( + args: StopEdgeDeploymentStageCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StopEdgeDeploymentStageCommandOutput) => void), + cb?: (err: any, data?: StopEdgeDeploymentStageCommandOutput) => void + ): Promise | void { + const command = new StopEdgeDeploymentStageCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Request to stop an edge packaging job.

    */ diff --git a/clients/client-sagemaker/src/SageMakerClient.ts b/clients/client-sagemaker/src/SageMakerClient.ts index 2f2e8ce358262..fde6bb98a9507 100644 --- a/clients/client-sagemaker/src/SageMakerClient.ts +++ b/clients/client-sagemaker/src/SageMakerClient.ts @@ -87,6 +87,14 @@ import { } from "./commands/CreateDataQualityJobDefinitionCommand"; import { CreateDeviceFleetCommandInput, CreateDeviceFleetCommandOutput } from "./commands/CreateDeviceFleetCommand"; import { CreateDomainCommandInput, CreateDomainCommandOutput } from "./commands/CreateDomainCommand"; +import { + CreateEdgeDeploymentPlanCommandInput, + CreateEdgeDeploymentPlanCommandOutput, +} from "./commands/CreateEdgeDeploymentPlanCommand"; +import { + CreateEdgeDeploymentStageCommandInput, + CreateEdgeDeploymentStageCommandOutput, +} from "./commands/CreateEdgeDeploymentStageCommand"; import { CreateEdgePackagingJobCommandInput, CreateEdgePackagingJobCommandOutput, @@ -192,6 +200,14 @@ import { } from "./commands/DeleteDataQualityJobDefinitionCommand"; import { DeleteDeviceFleetCommandInput, DeleteDeviceFleetCommandOutput } from "./commands/DeleteDeviceFleetCommand"; import { DeleteDomainCommandInput, DeleteDomainCommandOutput } from "./commands/DeleteDomainCommand"; +import { + DeleteEdgeDeploymentPlanCommandInput, + DeleteEdgeDeploymentPlanCommandOutput, +} from "./commands/DeleteEdgeDeploymentPlanCommand"; +import { + DeleteEdgeDeploymentStageCommandInput, + DeleteEdgeDeploymentStageCommandOutput, +} from "./commands/DeleteEdgeDeploymentStageCommand"; import { DeleteEndpointCommandInput, DeleteEndpointCommandOutput } from "./commands/DeleteEndpointCommand"; import { DeleteEndpointConfigCommandInput, @@ -284,6 +300,10 @@ import { DescribeDeviceFleetCommandOutput, } from "./commands/DescribeDeviceFleetCommand"; import { DescribeDomainCommandInput, DescribeDomainCommandOutput } from "./commands/DescribeDomainCommand"; +import { + DescribeEdgeDeploymentPlanCommandInput, + DescribeEdgeDeploymentPlanCommandOutput, +} from "./commands/DescribeEdgeDeploymentPlanCommand"; import { DescribeEdgePackagingJobCommandInput, DescribeEdgePackagingJobCommandOutput, @@ -467,6 +487,10 @@ import { import { ListDeviceFleetsCommandInput, ListDeviceFleetsCommandOutput } from "./commands/ListDeviceFleetsCommand"; import { ListDevicesCommandInput, ListDevicesCommandOutput } from "./commands/ListDevicesCommand"; import { ListDomainsCommandInput, ListDomainsCommandOutput } from "./commands/ListDomainsCommand"; +import { + ListEdgeDeploymentPlansCommandInput, + ListEdgeDeploymentPlansCommandOutput, +} from "./commands/ListEdgeDeploymentPlansCommand"; import { ListEdgePackagingJobsCommandInput, ListEdgePackagingJobsCommandOutput, @@ -549,6 +573,7 @@ import { import { ListPipelinesCommandInput, ListPipelinesCommandOutput } from "./commands/ListPipelinesCommand"; import { ListProcessingJobsCommandInput, ListProcessingJobsCommandOutput } from "./commands/ListProcessingJobsCommand"; import { ListProjectsCommandInput, ListProjectsCommandOutput } from "./commands/ListProjectsCommand"; +import { ListStageDevicesCommandInput, ListStageDevicesCommandOutput } from "./commands/ListStageDevicesCommand"; import { ListStudioLifecycleConfigsCommandInput, ListStudioLifecycleConfigsCommandOutput, @@ -592,6 +617,10 @@ import { SendPipelineExecutionStepSuccessCommandInput, SendPipelineExecutionStepSuccessCommandOutput, } from "./commands/SendPipelineExecutionStepSuccessCommand"; +import { + StartEdgeDeploymentStageCommandInput, + StartEdgeDeploymentStageCommandOutput, +} from "./commands/StartEdgeDeploymentStageCommand"; import { StartMonitoringScheduleCommandInput, StartMonitoringScheduleCommandOutput, @@ -606,6 +635,10 @@ import { } from "./commands/StartPipelineExecutionCommand"; import { StopAutoMLJobCommandInput, StopAutoMLJobCommandOutput } from "./commands/StopAutoMLJobCommand"; import { StopCompilationJobCommandInput, StopCompilationJobCommandOutput } from "./commands/StopCompilationJobCommand"; +import { + StopEdgeDeploymentStageCommandInput, + StopEdgeDeploymentStageCommandOutput, +} from "./commands/StopEdgeDeploymentStageCommand"; import { StopEdgePackagingJobCommandInput, StopEdgePackagingJobCommandOutput, @@ -707,6 +740,8 @@ export type ServiceInputTypes = | CreateDataQualityJobDefinitionCommandInput | CreateDeviceFleetCommandInput | CreateDomainCommandInput + | CreateEdgeDeploymentPlanCommandInput + | CreateEdgeDeploymentStageCommandInput | CreateEdgePackagingJobCommandInput | CreateEndpointCommandInput | CreateEndpointConfigCommandInput @@ -752,6 +787,8 @@ export type ServiceInputTypes = | DeleteDataQualityJobDefinitionCommandInput | DeleteDeviceFleetCommandInput | DeleteDomainCommandInput + | DeleteEdgeDeploymentPlanCommandInput + | DeleteEdgeDeploymentStageCommandInput | DeleteEndpointCommandInput | DeleteEndpointConfigCommandInput | DeleteExperimentCommandInput @@ -793,6 +830,7 @@ export type ServiceInputTypes = | DescribeDeviceCommandInput | DescribeDeviceFleetCommandInput | DescribeDomainCommandInput + | DescribeEdgeDeploymentPlanCommandInput | DescribeEdgePackagingJobCommandInput | DescribeEndpointCommandInput | DescribeEndpointConfigCommandInput @@ -853,6 +891,7 @@ export type ServiceInputTypes = | ListDeviceFleetsCommandInput | ListDevicesCommandInput | ListDomainsCommandInput + | ListEdgeDeploymentPlansCommandInput | ListEdgePackagingJobsCommandInput | ListEndpointConfigsCommandInput | ListEndpointsCommandInput @@ -884,6 +923,7 @@ export type ServiceInputTypes = | ListPipelinesCommandInput | ListProcessingJobsCommandInput | ListProjectsCommandInput + | ListStageDevicesCommandInput | ListStudioLifecycleConfigsCommandInput | ListSubscribedWorkteamsCommandInput | ListTagsCommandInput @@ -903,11 +943,13 @@ export type ServiceInputTypes = | SearchCommandInput | SendPipelineExecutionStepFailureCommandInput | SendPipelineExecutionStepSuccessCommandInput + | StartEdgeDeploymentStageCommandInput | StartMonitoringScheduleCommandInput | StartNotebookInstanceCommandInput | StartPipelineExecutionCommandInput | StopAutoMLJobCommandInput | StopCompilationJobCommandInput + | StopEdgeDeploymentStageCommandInput | StopEdgePackagingJobCommandInput | StopHyperParameterTuningJobCommandInput | StopInferenceRecommendationsJobCommandInput @@ -963,6 +1005,8 @@ export type ServiceOutputTypes = | CreateDataQualityJobDefinitionCommandOutput | CreateDeviceFleetCommandOutput | CreateDomainCommandOutput + | CreateEdgeDeploymentPlanCommandOutput + | CreateEdgeDeploymentStageCommandOutput | CreateEdgePackagingJobCommandOutput | CreateEndpointCommandOutput | CreateEndpointConfigCommandOutput @@ -1008,6 +1052,8 @@ export type ServiceOutputTypes = | DeleteDataQualityJobDefinitionCommandOutput | DeleteDeviceFleetCommandOutput | DeleteDomainCommandOutput + | DeleteEdgeDeploymentPlanCommandOutput + | DeleteEdgeDeploymentStageCommandOutput | DeleteEndpointCommandOutput | DeleteEndpointConfigCommandOutput | DeleteExperimentCommandOutput @@ -1049,6 +1095,7 @@ export type ServiceOutputTypes = | DescribeDeviceCommandOutput | DescribeDeviceFleetCommandOutput | DescribeDomainCommandOutput + | DescribeEdgeDeploymentPlanCommandOutput | DescribeEdgePackagingJobCommandOutput | DescribeEndpointCommandOutput | DescribeEndpointConfigCommandOutput @@ -1109,6 +1156,7 @@ export type ServiceOutputTypes = | ListDeviceFleetsCommandOutput | ListDevicesCommandOutput | ListDomainsCommandOutput + | ListEdgeDeploymentPlansCommandOutput | ListEdgePackagingJobsCommandOutput | ListEndpointConfigsCommandOutput | ListEndpointsCommandOutput @@ -1140,6 +1188,7 @@ export type ServiceOutputTypes = | ListPipelinesCommandOutput | ListProcessingJobsCommandOutput | ListProjectsCommandOutput + | ListStageDevicesCommandOutput | ListStudioLifecycleConfigsCommandOutput | ListSubscribedWorkteamsCommandOutput | ListTagsCommandOutput @@ -1159,11 +1208,13 @@ export type ServiceOutputTypes = | SearchCommandOutput | SendPipelineExecutionStepFailureCommandOutput | SendPipelineExecutionStepSuccessCommandOutput + | StartEdgeDeploymentStageCommandOutput | StartMonitoringScheduleCommandOutput | StartNotebookInstanceCommandOutput | StartPipelineExecutionCommandOutput | StopAutoMLJobCommandOutput | StopCompilationJobCommandOutput + | StopEdgeDeploymentStageCommandOutput | StopEdgePackagingJobCommandOutput | StopHyperParameterTuningJobCommandOutput | StopInferenceRecommendationsJobCommandOutput diff --git a/clients/client-sagemaker/src/commands/CreateEdgeDeploymentPlanCommand.ts b/clients/client-sagemaker/src/commands/CreateEdgeDeploymentPlanCommand.ts new file mode 100644 index 0000000000000..d081f928f378f --- /dev/null +++ b/clients/client-sagemaker/src/commands/CreateEdgeDeploymentPlanCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateEdgeDeploymentPlanRequest, CreateEdgeDeploymentPlanResponse } from "../models/models_0"; +import { + deserializeAws_json1_1CreateEdgeDeploymentPlanCommand, + serializeAws_json1_1CreateEdgeDeploymentPlanCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface CreateEdgeDeploymentPlanCommandInput extends CreateEdgeDeploymentPlanRequest {} +export interface CreateEdgeDeploymentPlanCommandOutput extends CreateEdgeDeploymentPlanResponse, __MetadataBearer {} + +/** + *

    Creates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment configuration and devices.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, CreateEdgeDeploymentPlanCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, CreateEdgeDeploymentPlanCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new CreateEdgeDeploymentPlanCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateEdgeDeploymentPlanCommandInput} for command's `input` shape. + * @see {@link CreateEdgeDeploymentPlanCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class CreateEdgeDeploymentPlanCommand extends $Command< + CreateEdgeDeploymentPlanCommandInput, + CreateEdgeDeploymentPlanCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateEdgeDeploymentPlanCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SageMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SageMakerClient"; + const commandName = "CreateEdgeDeploymentPlanCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateEdgeDeploymentPlanRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateEdgeDeploymentPlanResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateEdgeDeploymentPlanCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1CreateEdgeDeploymentPlanCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1CreateEdgeDeploymentPlanCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker/src/commands/CreateEdgeDeploymentStageCommand.ts b/clients/client-sagemaker/src/commands/CreateEdgeDeploymentStageCommand.ts new file mode 100644 index 0000000000000..528c9401f362a --- /dev/null +++ b/clients/client-sagemaker/src/commands/CreateEdgeDeploymentStageCommand.ts @@ -0,0 +1,99 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateEdgeDeploymentStageRequest } from "../models/models_0"; +import { + deserializeAws_json1_1CreateEdgeDeploymentStageCommand, + serializeAws_json1_1CreateEdgeDeploymentStageCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface CreateEdgeDeploymentStageCommandInput extends CreateEdgeDeploymentStageRequest {} +export interface CreateEdgeDeploymentStageCommandOutput extends __MetadataBearer {} + +/** + *

    Creates a new stage in an existing edge deployment plan.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, CreateEdgeDeploymentStageCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, CreateEdgeDeploymentStageCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new CreateEdgeDeploymentStageCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateEdgeDeploymentStageCommandInput} for command's `input` shape. + * @see {@link CreateEdgeDeploymentStageCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class CreateEdgeDeploymentStageCommand extends $Command< + CreateEdgeDeploymentStageCommandInput, + CreateEdgeDeploymentStageCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateEdgeDeploymentStageCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SageMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SageMakerClient"; + const commandName = "CreateEdgeDeploymentStageCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateEdgeDeploymentStageRequest.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateEdgeDeploymentStageCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1CreateEdgeDeploymentStageCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1CreateEdgeDeploymentStageCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker/src/commands/CreateHyperParameterTuningJobCommand.ts b/clients/client-sagemaker/src/commands/CreateHyperParameterTuningJobCommand.ts index cf960eb59bca7..618ceae5916d6 100644 --- a/clients/client-sagemaker/src/commands/CreateHyperParameterTuningJobCommand.ts +++ b/clients/client-sagemaker/src/commands/CreateHyperParameterTuningJobCommand.ts @@ -12,7 +12,7 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { CreateHyperParameterTuningJobRequest, CreateHyperParameterTuningJobResponse } from "../models/models_0"; +import { CreateHyperParameterTuningJobRequest, CreateHyperParameterTuningJobResponse } from "../models/models_1"; import { deserializeAws_json1_1CreateHyperParameterTuningJobCommand, serializeAws_json1_1CreateHyperParameterTuningJobCommand, diff --git a/clients/client-sagemaker/src/commands/DeleteEdgeDeploymentPlanCommand.ts b/clients/client-sagemaker/src/commands/DeleteEdgeDeploymentPlanCommand.ts new file mode 100644 index 0000000000000..1f442c02d63dd --- /dev/null +++ b/clients/client-sagemaker/src/commands/DeleteEdgeDeploymentPlanCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteEdgeDeploymentPlanRequest } from "../models/models_1"; +import { + deserializeAws_json1_1DeleteEdgeDeploymentPlanCommand, + serializeAws_json1_1DeleteEdgeDeploymentPlanCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface DeleteEdgeDeploymentPlanCommandInput extends DeleteEdgeDeploymentPlanRequest {} +export interface DeleteEdgeDeploymentPlanCommandOutput extends __MetadataBearer {} + +/** + *

    Deletes an edge deployment plan if (and only if) all the stages in the plan are inactive or there are no stages in the plan.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, DeleteEdgeDeploymentPlanCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, DeleteEdgeDeploymentPlanCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new DeleteEdgeDeploymentPlanCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteEdgeDeploymentPlanCommandInput} for command's `input` shape. + * @see {@link DeleteEdgeDeploymentPlanCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class DeleteEdgeDeploymentPlanCommand extends $Command< + DeleteEdgeDeploymentPlanCommandInput, + DeleteEdgeDeploymentPlanCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteEdgeDeploymentPlanCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SageMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SageMakerClient"; + const commandName = "DeleteEdgeDeploymentPlanCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteEdgeDeploymentPlanRequest.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteEdgeDeploymentPlanCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DeleteEdgeDeploymentPlanCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1DeleteEdgeDeploymentPlanCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker/src/commands/DeleteEdgeDeploymentStageCommand.ts b/clients/client-sagemaker/src/commands/DeleteEdgeDeploymentStageCommand.ts new file mode 100644 index 0000000000000..f49f9c46ed0ba --- /dev/null +++ b/clients/client-sagemaker/src/commands/DeleteEdgeDeploymentStageCommand.ts @@ -0,0 +1,99 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteEdgeDeploymentStageRequest } from "../models/models_1"; +import { + deserializeAws_json1_1DeleteEdgeDeploymentStageCommand, + serializeAws_json1_1DeleteEdgeDeploymentStageCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface DeleteEdgeDeploymentStageCommandInput extends DeleteEdgeDeploymentStageRequest {} +export interface DeleteEdgeDeploymentStageCommandOutput extends __MetadataBearer {} + +/** + *

    Delete a stage in an edge deployment plan if (and only if) the stage is inactive.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, DeleteEdgeDeploymentStageCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, DeleteEdgeDeploymentStageCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new DeleteEdgeDeploymentStageCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteEdgeDeploymentStageCommandInput} for command's `input` shape. + * @see {@link DeleteEdgeDeploymentStageCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class DeleteEdgeDeploymentStageCommand extends $Command< + DeleteEdgeDeploymentStageCommandInput, + DeleteEdgeDeploymentStageCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteEdgeDeploymentStageCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SageMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SageMakerClient"; + const commandName = "DeleteEdgeDeploymentStageCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteEdgeDeploymentStageRequest.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteEdgeDeploymentStageCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DeleteEdgeDeploymentStageCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DeleteEdgeDeploymentStageCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker/src/commands/DescribeEdgeDeploymentPlanCommand.ts b/clients/client-sagemaker/src/commands/DescribeEdgeDeploymentPlanCommand.ts new file mode 100644 index 0000000000000..89d10c179a810 --- /dev/null +++ b/clients/client-sagemaker/src/commands/DescribeEdgeDeploymentPlanCommand.ts @@ -0,0 +1,99 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DescribeEdgeDeploymentPlanRequest, DescribeEdgeDeploymentPlanResponse } from "../models/models_1"; +import { + deserializeAws_json1_1DescribeEdgeDeploymentPlanCommand, + serializeAws_json1_1DescribeEdgeDeploymentPlanCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface DescribeEdgeDeploymentPlanCommandInput extends DescribeEdgeDeploymentPlanRequest {} +export interface DescribeEdgeDeploymentPlanCommandOutput extends DescribeEdgeDeploymentPlanResponse, __MetadataBearer {} + +/** + *

    Describes an edge deployment plan with deployment status per stage.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, DescribeEdgeDeploymentPlanCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, DescribeEdgeDeploymentPlanCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new DescribeEdgeDeploymentPlanCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeEdgeDeploymentPlanCommandInput} for command's `input` shape. + * @see {@link DescribeEdgeDeploymentPlanCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class DescribeEdgeDeploymentPlanCommand extends $Command< + DescribeEdgeDeploymentPlanCommandInput, + DescribeEdgeDeploymentPlanCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeEdgeDeploymentPlanCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SageMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SageMakerClient"; + const commandName = "DescribeEdgeDeploymentPlanCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeEdgeDeploymentPlanRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeEdgeDeploymentPlanResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribeEdgeDeploymentPlanCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DescribeEdgeDeploymentPlanCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DescribeEdgeDeploymentPlanCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker/src/commands/DescribeHyperParameterTuningJobCommand.ts b/clients/client-sagemaker/src/commands/DescribeHyperParameterTuningJobCommand.ts index b2ab633365204..a297e260a93bb 100644 --- a/clients/client-sagemaker/src/commands/DescribeHyperParameterTuningJobCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeHyperParameterTuningJobCommand.ts @@ -12,7 +12,7 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { DescribeHyperParameterTuningJobRequest, DescribeHyperParameterTuningJobResponse } from "../models/models_1"; +import { DescribeHyperParameterTuningJobRequest, DescribeHyperParameterTuningJobResponse } from "../models/models_2"; import { deserializeAws_json1_1DescribeHyperParameterTuningJobCommand, serializeAws_json1_1DescribeHyperParameterTuningJobCommand, diff --git a/clients/client-sagemaker/src/commands/DescribeImageCommand.ts b/clients/client-sagemaker/src/commands/DescribeImageCommand.ts index 75c5a668f4b51..f051a63f4b4b8 100644 --- a/clients/client-sagemaker/src/commands/DescribeImageCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeImageCommand.ts @@ -12,7 +12,7 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { DescribeImageRequest, DescribeImageResponse } from "../models/models_1"; +import { DescribeImageRequest, DescribeImageResponse } from "../models/models_2"; import { deserializeAws_json1_1DescribeImageCommand, serializeAws_json1_1DescribeImageCommand, diff --git a/clients/client-sagemaker/src/commands/DescribeImageVersionCommand.ts b/clients/client-sagemaker/src/commands/DescribeImageVersionCommand.ts index eda970d0fec03..10284afe9e65c 100644 --- a/clients/client-sagemaker/src/commands/DescribeImageVersionCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeImageVersionCommand.ts @@ -12,7 +12,7 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { DescribeImageVersionRequest, DescribeImageVersionResponse } from "../models/models_1"; +import { DescribeImageVersionRequest, DescribeImageVersionResponse } from "../models/models_2"; import { deserializeAws_json1_1DescribeImageVersionCommand, serializeAws_json1_1DescribeImageVersionCommand, diff --git a/clients/client-sagemaker/src/commands/DescribeInferenceRecommendationsJobCommand.ts b/clients/client-sagemaker/src/commands/DescribeInferenceRecommendationsJobCommand.ts index 5a983a44917fd..8f4e4bebc0ed8 100644 --- a/clients/client-sagemaker/src/commands/DescribeInferenceRecommendationsJobCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeInferenceRecommendationsJobCommand.ts @@ -12,8 +12,10 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { DescribeInferenceRecommendationsJobRequest } from "../models/models_1"; -import { DescribeInferenceRecommendationsJobResponse } from "../models/models_2"; +import { + DescribeInferenceRecommendationsJobRequest, + DescribeInferenceRecommendationsJobResponse, +} from "../models/models_2"; import { deserializeAws_json1_1DescribeInferenceRecommendationsJobCommand, serializeAws_json1_1DescribeInferenceRecommendationsJobCommand, diff --git a/clients/client-sagemaker/src/commands/ListEdgeDeploymentPlansCommand.ts b/clients/client-sagemaker/src/commands/ListEdgeDeploymentPlansCommand.ts new file mode 100644 index 0000000000000..35111f3e47e09 --- /dev/null +++ b/clients/client-sagemaker/src/commands/ListEdgeDeploymentPlansCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListEdgeDeploymentPlansRequest, ListEdgeDeploymentPlansResponse } from "../models/models_2"; +import { + deserializeAws_json1_1ListEdgeDeploymentPlansCommand, + serializeAws_json1_1ListEdgeDeploymentPlansCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface ListEdgeDeploymentPlansCommandInput extends ListEdgeDeploymentPlansRequest {} +export interface ListEdgeDeploymentPlansCommandOutput extends ListEdgeDeploymentPlansResponse, __MetadataBearer {} + +/** + *

    Lists all edge deployment plans.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, ListEdgeDeploymentPlansCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, ListEdgeDeploymentPlansCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new ListEdgeDeploymentPlansCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListEdgeDeploymentPlansCommandInput} for command's `input` shape. + * @see {@link ListEdgeDeploymentPlansCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class ListEdgeDeploymentPlansCommand extends $Command< + ListEdgeDeploymentPlansCommandInput, + ListEdgeDeploymentPlansCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListEdgeDeploymentPlansCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SageMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SageMakerClient"; + const commandName = "ListEdgeDeploymentPlansCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListEdgeDeploymentPlansRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListEdgeDeploymentPlansResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListEdgeDeploymentPlansCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1ListEdgeDeploymentPlansCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1ListEdgeDeploymentPlansCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker/src/commands/ListPipelineExecutionStepsCommand.ts b/clients/client-sagemaker/src/commands/ListPipelineExecutionStepsCommand.ts index ee379522191bb..a86035b771e65 100644 --- a/clients/client-sagemaker/src/commands/ListPipelineExecutionStepsCommand.ts +++ b/clients/client-sagemaker/src/commands/ListPipelineExecutionStepsCommand.ts @@ -12,7 +12,7 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { ListPipelineExecutionStepsRequest, ListPipelineExecutionStepsResponse } from "../models/models_2"; +import { ListPipelineExecutionStepsRequest, ListPipelineExecutionStepsResponse } from "../models/models_3"; import { deserializeAws_json1_1ListPipelineExecutionStepsCommand, serializeAws_json1_1ListPipelineExecutionStepsCommand, diff --git a/clients/client-sagemaker/src/commands/ListPipelineParametersForExecutionCommand.ts b/clients/client-sagemaker/src/commands/ListPipelineParametersForExecutionCommand.ts index 4464c43bd1196..5204ff6e86513 100644 --- a/clients/client-sagemaker/src/commands/ListPipelineParametersForExecutionCommand.ts +++ b/clients/client-sagemaker/src/commands/ListPipelineParametersForExecutionCommand.ts @@ -15,7 +15,7 @@ import { import { ListPipelineParametersForExecutionRequest, ListPipelineParametersForExecutionResponse, -} from "../models/models_2"; +} from "../models/models_3"; import { deserializeAws_json1_1ListPipelineParametersForExecutionCommand, serializeAws_json1_1ListPipelineParametersForExecutionCommand, diff --git a/clients/client-sagemaker/src/commands/ListPipelinesCommand.ts b/clients/client-sagemaker/src/commands/ListPipelinesCommand.ts index eb69049e3683c..2f5288a04c3f6 100644 --- a/clients/client-sagemaker/src/commands/ListPipelinesCommand.ts +++ b/clients/client-sagemaker/src/commands/ListPipelinesCommand.ts @@ -12,7 +12,7 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { ListPipelinesRequest, ListPipelinesResponse } from "../models/models_2"; +import { ListPipelinesRequest, ListPipelinesResponse } from "../models/models_3"; import { deserializeAws_json1_1ListPipelinesCommand, serializeAws_json1_1ListPipelinesCommand, diff --git a/clients/client-sagemaker/src/commands/ListProcessingJobsCommand.ts b/clients/client-sagemaker/src/commands/ListProcessingJobsCommand.ts index 0ed04c0c75b0d..36f3eb1686bbd 100644 --- a/clients/client-sagemaker/src/commands/ListProcessingJobsCommand.ts +++ b/clients/client-sagemaker/src/commands/ListProcessingJobsCommand.ts @@ -12,7 +12,7 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { ListProcessingJobsRequest, ListProcessingJobsResponse } from "../models/models_2"; +import { ListProcessingJobsRequest, ListProcessingJobsResponse } from "../models/models_3"; import { deserializeAws_json1_1ListProcessingJobsCommand, serializeAws_json1_1ListProcessingJobsCommand, diff --git a/clients/client-sagemaker/src/commands/ListStageDevicesCommand.ts b/clients/client-sagemaker/src/commands/ListStageDevicesCommand.ts new file mode 100644 index 0000000000000..d5db488e87e8b --- /dev/null +++ b/clients/client-sagemaker/src/commands/ListStageDevicesCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListStageDevicesRequest, ListStageDevicesResponse } from "../models/models_3"; +import { + deserializeAws_json1_1ListStageDevicesCommand, + serializeAws_json1_1ListStageDevicesCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface ListStageDevicesCommandInput extends ListStageDevicesRequest {} +export interface ListStageDevicesCommandOutput extends ListStageDevicesResponse, __MetadataBearer {} + +/** + *

    Lists devices allocated to the stage, containing detailed device information and deployment status.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, ListStageDevicesCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, ListStageDevicesCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new ListStageDevicesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListStageDevicesCommandInput} for command's `input` shape. + * @see {@link ListStageDevicesCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class ListStageDevicesCommand extends $Command< + ListStageDevicesCommandInput, + ListStageDevicesCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListStageDevicesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SageMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SageMakerClient"; + const commandName = "ListStageDevicesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListStageDevicesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListStageDevicesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListStageDevicesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1ListStageDevicesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1ListStageDevicesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker/src/commands/StartEdgeDeploymentStageCommand.ts b/clients/client-sagemaker/src/commands/StartEdgeDeploymentStageCommand.ts new file mode 100644 index 0000000000000..2d77afbcd0b8c --- /dev/null +++ b/clients/client-sagemaker/src/commands/StartEdgeDeploymentStageCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { StartEdgeDeploymentStageRequest } from "../models/models_3"; +import { + deserializeAws_json1_1StartEdgeDeploymentStageCommand, + serializeAws_json1_1StartEdgeDeploymentStageCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface StartEdgeDeploymentStageCommandInput extends StartEdgeDeploymentStageRequest {} +export interface StartEdgeDeploymentStageCommandOutput extends __MetadataBearer {} + +/** + *

    Starts a stage in an edge deployment plan.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, StartEdgeDeploymentStageCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, StartEdgeDeploymentStageCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new StartEdgeDeploymentStageCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StartEdgeDeploymentStageCommandInput} for command's `input` shape. + * @see {@link StartEdgeDeploymentStageCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class StartEdgeDeploymentStageCommand extends $Command< + StartEdgeDeploymentStageCommandInput, + StartEdgeDeploymentStageCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartEdgeDeploymentStageCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SageMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SageMakerClient"; + const commandName = "StartEdgeDeploymentStageCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StartEdgeDeploymentStageRequest.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: StartEdgeDeploymentStageCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1StartEdgeDeploymentStageCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1StartEdgeDeploymentStageCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker/src/commands/StopEdgeDeploymentStageCommand.ts b/clients/client-sagemaker/src/commands/StopEdgeDeploymentStageCommand.ts new file mode 100644 index 0000000000000..ac2bb0e3b263c --- /dev/null +++ b/clients/client-sagemaker/src/commands/StopEdgeDeploymentStageCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { StopEdgeDeploymentStageRequest } from "../models/models_3"; +import { + deserializeAws_json1_1StopEdgeDeploymentStageCommand, + serializeAws_json1_1StopEdgeDeploymentStageCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface StopEdgeDeploymentStageCommandInput extends StopEdgeDeploymentStageRequest {} +export interface StopEdgeDeploymentStageCommandOutput extends __MetadataBearer {} + +/** + *

    Stops a stage in an edge deployment plan.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, StopEdgeDeploymentStageCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, StopEdgeDeploymentStageCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new StopEdgeDeploymentStageCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StopEdgeDeploymentStageCommandInput} for command's `input` shape. + * @see {@link StopEdgeDeploymentStageCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class StopEdgeDeploymentStageCommand extends $Command< + StopEdgeDeploymentStageCommandInput, + StopEdgeDeploymentStageCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StopEdgeDeploymentStageCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SageMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SageMakerClient"; + const commandName = "StopEdgeDeploymentStageCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StopEdgeDeploymentStageRequest.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: StopEdgeDeploymentStageCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1StopEdgeDeploymentStageCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1StopEdgeDeploymentStageCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker/src/commands/index.ts b/clients/client-sagemaker/src/commands/index.ts index 107d587bba407..288abce9dd441 100644 --- a/clients/client-sagemaker/src/commands/index.ts +++ b/clients/client-sagemaker/src/commands/index.ts @@ -15,6 +15,8 @@ export * from "./CreateContextCommand"; export * from "./CreateDataQualityJobDefinitionCommand"; export * from "./CreateDeviceFleetCommand"; export * from "./CreateDomainCommand"; +export * from "./CreateEdgeDeploymentPlanCommand"; +export * from "./CreateEdgeDeploymentStageCommand"; export * from "./CreateEdgePackagingJobCommand"; export * from "./CreateEndpointCommand"; export * from "./CreateEndpointConfigCommand"; @@ -60,6 +62,8 @@ export * from "./DeleteContextCommand"; export * from "./DeleteDataQualityJobDefinitionCommand"; export * from "./DeleteDeviceFleetCommand"; export * from "./DeleteDomainCommand"; +export * from "./DeleteEdgeDeploymentPlanCommand"; +export * from "./DeleteEdgeDeploymentStageCommand"; export * from "./DeleteEndpointCommand"; export * from "./DeleteEndpointConfigCommand"; export * from "./DeleteExperimentCommand"; @@ -101,6 +105,7 @@ export * from "./DescribeDataQualityJobDefinitionCommand"; export * from "./DescribeDeviceCommand"; export * from "./DescribeDeviceFleetCommand"; export * from "./DescribeDomainCommand"; +export * from "./DescribeEdgeDeploymentPlanCommand"; export * from "./DescribeEdgePackagingJobCommand"; export * from "./DescribeEndpointCommand"; export * from "./DescribeEndpointConfigCommand"; @@ -161,6 +166,7 @@ export * from "./ListDataQualityJobDefinitionsCommand"; export * from "./ListDeviceFleetsCommand"; export * from "./ListDevicesCommand"; export * from "./ListDomainsCommand"; +export * from "./ListEdgeDeploymentPlansCommand"; export * from "./ListEdgePackagingJobsCommand"; export * from "./ListEndpointConfigsCommand"; export * from "./ListEndpointsCommand"; @@ -192,6 +198,7 @@ export * from "./ListPipelineParametersForExecutionCommand"; export * from "./ListPipelinesCommand"; export * from "./ListProcessingJobsCommand"; export * from "./ListProjectsCommand"; +export * from "./ListStageDevicesCommand"; export * from "./ListStudioLifecycleConfigsCommand"; export * from "./ListSubscribedWorkteamsCommand"; export * from "./ListTagsCommand"; @@ -211,11 +218,13 @@ export * from "./RetryPipelineExecutionCommand"; export * from "./SearchCommand"; export * from "./SendPipelineExecutionStepFailureCommand"; export * from "./SendPipelineExecutionStepSuccessCommand"; +export * from "./StartEdgeDeploymentStageCommand"; export * from "./StartMonitoringScheduleCommand"; export * from "./StartNotebookInstanceCommand"; export * from "./StartPipelineExecutionCommand"; export * from "./StopAutoMLJobCommand"; export * from "./StopCompilationJobCommand"; +export * from "./StopEdgeDeploymentStageCommand"; export * from "./StopEdgePackagingJobCommand"; export * from "./StopHyperParameterTuningJobCommand"; export * from "./StopInferenceRecommendationsJobCommand"; diff --git a/clients/client-sagemaker/src/models/models_0.ts b/clients/client-sagemaker/src/models/models_0.ts index feddbbbaf3a61..ff7002a2e6ab2 100644 --- a/clients/client-sagemaker/src/models/models_0.ts +++ b/clients/client-sagemaker/src/models/models_0.ts @@ -9513,6 +9513,197 @@ export namespace CreateDomainResponse { }); } +/** + *

    Contains information about the configuration of a model in a deployment.

    + */ +export interface EdgeDeploymentModelConfig { + /** + *

    The name the device application uses to reference this model.

    + */ + ModelHandle: string | undefined; + + /** + *

    The edge packaging job associated with this deployment.

    + */ + EdgePackagingJobName: string | undefined; +} + +export namespace EdgeDeploymentModelConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EdgeDeploymentModelConfig): any => ({ + ...obj, + }); +} + +export enum FailureHandlingPolicy { + DoNothing = "DO_NOTHING", + RollbackOnFailure = "ROLLBACK_ON_FAILURE", +} + +/** + *

    Contains information about the configuration of a deployment.

    + */ +export interface EdgeDeploymentConfig { + /** + *

    Toggle that determines whether to rollback to previous configuration if the current deployment fails. + * By default this is turned on. You may turn this off if you want to investigate the errors yourself.

    + */ + FailureHandlingPolicy: FailureHandlingPolicy | string | undefined; +} + +export namespace EdgeDeploymentConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EdgeDeploymentConfig): any => ({ + ...obj, + }); +} + +export enum DeviceSubsetType { + NameContains = "NAMECONTAINS", + Percentage = "PERCENTAGE", + Selection = "SELECTION", +} + +/** + *

    Contains information about the configurations of selected devices.

    + */ +export interface DeviceSelectionConfig { + /** + *

    Type of device subsets to deploy to the current stage.

    + */ + DeviceSubsetType: DeviceSubsetType | string | undefined; + + /** + *

    Percentage of devices in the fleet to deploy to the current stage.

    + */ + Percentage?: number; + + /** + *

    List of devices chosen to deploy.

    + */ + DeviceNames?: string[]; + + /** + *

    A filter to select devices with names containing this name.

    + */ + DeviceNameContains?: string; +} + +export namespace DeviceSelectionConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeviceSelectionConfig): any => ({ + ...obj, + }); +} + +/** + *

    Contains information about a stage in an edge deployment plan.

    + */ +export interface DeploymentStage { + /** + *

    The name of the stage.

    + */ + StageName: string | undefined; + + /** + *

    Configuration of the devices in the stage.

    + */ + DeviceSelectionConfig: DeviceSelectionConfig | undefined; + + /** + *

    Configuration of the deployment details.

    + */ + DeploymentConfig?: EdgeDeploymentConfig; +} + +export namespace DeploymentStage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentStage): any => ({ + ...obj, + }); +} + +export interface CreateEdgeDeploymentPlanRequest { + /** + *

    The name of the edge deployment plan.

    + */ + EdgeDeploymentPlanName: string | undefined; + + /** + *

    List of models associated with the edge deployment plan.

    + */ + ModelConfigs: EdgeDeploymentModelConfig[] | undefined; + + /** + *

    The device fleet used for this edge deployment plan.

    + */ + DeviceFleetName: string | undefined; + + /** + *

    List of stages of the edge deployment plan. The number of stages is limited to 10 per deployment.

    + */ + Stages?: DeploymentStage[]; + + /** + *

    List of tags with which to tag the edge deployment plan.

    + */ + Tags?: Tag[]; +} + +export namespace CreateEdgeDeploymentPlanRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEdgeDeploymentPlanRequest): any => ({ + ...obj, + }); +} + +export interface CreateEdgeDeploymentPlanResponse { + /** + *

    The ARN of the edge deployment plan.

    + */ + EdgeDeploymentPlanArn: string | undefined; +} + +export namespace CreateEdgeDeploymentPlanResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEdgeDeploymentPlanResponse): any => ({ + ...obj, + }); +} + +export interface CreateEdgeDeploymentStageRequest { + /** + *

    The name of the edge deployment plan.

    + */ + EdgeDeploymentPlanName: string | undefined; + + /** + *

    List of stages to be added to the edge deployment plan.

    + */ + Stages: DeploymentStage[] | undefined; +} + +export namespace CreateEdgeDeploymentStageRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEdgeDeploymentStageRequest): any => ({ + ...obj, + }); +} + export interface CreateEdgePackagingJobRequest { /** *

    The name of the edge packaging job.

    @@ -11549,516 +11740,3 @@ export namespace TuningJobCompletionCriteria { ...obj, }); } - -/** - *

    Configures a hyperparameter tuning job.

    - */ -export interface HyperParameterTuningJobConfig { - /** - *

    Specifies how hyperparameter tuning chooses the combinations of hyperparameter values - * to use for the training job it launches. To use the Bayesian search strategy, set this - * to Bayesian. To randomly search, set it to Random. For - * information about search strategies, see How - * Hyperparameter Tuning Works.

    - */ - Strategy: HyperParameterTuningJobStrategyType | string | undefined; - - /** - *

    The HyperParameterTuningJobObjective object that specifies the - * objective - * metric for this tuning job.

    - */ - HyperParameterTuningJobObjective?: HyperParameterTuningJobObjective; - - /** - *

    The ResourceLimits object that specifies the - * maximum - * number of training jobs and parallel training jobs for this tuning - * job.

    - */ - ResourceLimits: ResourceLimits | undefined; - - /** - *

    The ParameterRanges object that specifies the ranges of - * hyperparameters - * that this tuning job searches.

    - */ - ParameterRanges?: ParameterRanges; - - /** - *

    Specifies whether to use early stopping for training jobs launched by the - * hyperparameter tuning job. This can be one of the following values (the default value is - * OFF):

    - *
    - *
    OFF
    - *
    - *

    Training jobs launched by the hyperparameter tuning job do not use early - * stopping.

    - *
    - *
    AUTO
    - *
    - *

    SageMaker stops training jobs launched by the hyperparameter tuning job when - * they are unlikely to perform better than previously completed training jobs. - * For more information, see Stop Training Jobs Early.

    - *
    - *
    - */ - TrainingJobEarlyStoppingType?: TrainingJobEarlyStoppingType | string; - - /** - *

    The tuning job's completion criteria.

    - */ - TuningJobCompletionCriteria?: TuningJobCompletionCriteria; -} - -export namespace HyperParameterTuningJobConfig { - /** - * @internal - */ - export const filterSensitiveLog = (obj: HyperParameterTuningJobConfig): any => ({ - ...obj, - }); -} - -/** - *

    Specifies - * which - * training algorithm to use for training jobs that a hyperparameter - * tuning job launches and the metrics to monitor.

    - */ -export interface HyperParameterAlgorithmSpecification { - /** - *

    The registry path of the Docker image that contains the training algorithm. For - * information about Docker registry paths for built-in algorithms, see Algorithms - * Provided by Amazon SageMaker: Common Parameters. SageMaker supports both - * registry/repository[:tag] and registry/repository[@digest] - * image path formats. For more information, see Using Your Own Algorithms with Amazon - * SageMaker.

    - */ - TrainingImage?: string; - - /** - *

    The training input mode that the algorithm supports. For more information about input modes, see - * Algorithms.

    - * - *

    - * Pipe mode - *

    - *

    If an algorithm supports Pipe mode, Amazon SageMaker streams data directly - * from Amazon S3 to the container.

    - * - *

    - * File mode - *

    - *

    If an algorithm supports File mode, SageMaker - * downloads the training data from S3 to the provisioned ML storage volume, and mounts the - * directory to the Docker volume for the training container.

    - *

    You must provision the ML storage volume with sufficient capacity - * to accommodate the data downloaded from S3. In addition to the training data, the ML - * storage volume also stores the output model. The algorithm container uses the ML storage - * volume to also store intermediate information, if any.

    - *

    For distributed algorithms, training data is distributed uniformly. - * Your training duration is predictable if the input data objects sizes are - * approximately the same. SageMaker does not split the files any further for model training. - * If the object sizes are skewed, training won't be optimal as the data distribution is also - * skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in - * training.

    - * - *

    - * FastFile mode - *

    - *

    If an algorithm supports FastFile mode, SageMaker streams data directly - * from S3 to the container with no code changes, and provides file system access to - * the data. Users can author their training script to interact with these files as if - * they were stored on disk.

    - *

    - * FastFile mode works best when the data is read sequentially. - * Augmented manifest files aren't supported. - * The startup time is lower when there are fewer files in the S3 bucket provided.

    - */ - TrainingInputMode: TrainingInputMode | string | undefined; - - /** - *

    The name of the resource algorithm to use for the hyperparameter tuning job. If you - * specify a value for this parameter, do not specify a value for - * TrainingImage.

    - */ - AlgorithmName?: string; - - /** - *

    An array of MetricDefinition objects that specify the - * metrics - * that the algorithm emits.

    - */ - MetricDefinitions?: MetricDefinition[]; -} - -export namespace HyperParameterAlgorithmSpecification { - /** - * @internal - */ - export const filterSensitiveLog = (obj: HyperParameterAlgorithmSpecification): any => ({ - ...obj, - }); -} - -/** - *

    The retry strategy to use when a training job fails due to an - * InternalServerError. RetryStrategy is specified as part of - * the CreateTrainingJob and CreateHyperParameterTuningJob - * requests. You can add the StoppingCondition parameter to the request to - * limit the training time for the complete job.

    - */ -export interface RetryStrategy { - /** - *

    The number of times to retry the job. When the job is retried, it's - * SecondaryStatus is changed to STARTING.

    - */ - MaximumRetryAttempts: number | undefined; -} - -export namespace RetryStrategy { - /** - * @internal - */ - export const filterSensitiveLog = (obj: RetryStrategy): any => ({ - ...obj, - }); -} - -/** - *

    Defines - * the training jobs launched by a hyperparameter tuning job.

    - */ -export interface HyperParameterTrainingJobDefinition { - /** - *

    The job definition name.

    - */ - DefinitionName?: string; - - /** - *

    Defines the objective metric for a hyperparameter tuning job. - * Hyperparameter - * tuning uses the value of this metric to evaluate the training jobs it launches, and - * returns the training job that results in either the highest or lowest value for this - * metric, depending on the value you specify for the Type - * parameter.

    - */ - TuningObjective?: HyperParameterTuningJobObjective; - - /** - *

    Specifies ranges of integer, continuous, and categorical hyperparameters that a - * hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs - * with hyperparameter values within these ranges to find the combination of values that - * result in the training job with the best performance as measured by the objective metric - * of the hyperparameter tuning job.

    - * - *

    The maximum number of items specified for Array Members refers to - * the maximum number of hyperparameters for each range and also the maximum for the - * hyperparameter tuning job itself. That is, the sum of the number of hyperparameters - * for all the ranges can't exceed the maximum number specified.

    - *
    - */ - HyperParameterRanges?: ParameterRanges; - - /** - *

    Specifies the values of hyperparameters - * that - * do not change for the tuning job.

    - */ - StaticHyperParameters?: Record; - - /** - *

    The HyperParameterAlgorithmSpecification object that - * specifies - * the resource algorithm to use for the training jobs that the tuning - * job launches.

    - */ - AlgorithmSpecification: HyperParameterAlgorithmSpecification | undefined; - - /** - *

    The Amazon Resource Name (ARN) of the - * IAM - * role associated with the training jobs that the tuning job - * launches.

    - */ - RoleArn: string | undefined; - - /** - *

    An array of Channel objects that specify - * the - * input for the training jobs that the tuning job launches.

    - */ - InputDataConfig?: Channel[]; - - /** - *

    The VpcConfig object that - * specifies - * the VPC that you want the training jobs that this hyperparameter - * tuning job launches to connect to. Control access to and from your - * training - * container by configuring the VPC. For more information, see Protect Training Jobs - * by Using an Amazon Virtual Private Cloud.

    - */ - VpcConfig?: VpcConfig; - - /** - *

    Specifies the path to the Amazon S3 bucket where you - * store - * model artifacts from the training jobs that the tuning job - * launches.

    - */ - OutputDataConfig: OutputDataConfig | undefined; - - /** - *

    The resources, - * including - * the compute instances and storage volumes, to use for the training - * jobs that the tuning job launches.

    - *

    Storage volumes store model artifacts and - * incremental - * states. Training algorithms might also use storage volumes for - * scratch - * space. If you want SageMaker to use the storage volume to store the - * training data, choose File as the TrainingInputMode in the - * algorithm specification. For distributed training algorithms, specify an instance count - * greater than 1.

    - */ - ResourceConfig: ResourceConfig | undefined; - - /** - *

    Specifies a limit to how long a model hyperparameter training job can run. It also - * specifies how long a managed spot training job has to complete. When the job reaches the - * time limit, SageMaker ends the training job. Use this API to cap model training costs.

    - */ - StoppingCondition: StoppingCondition | undefined; - - /** - *

    Isolates the training container. No inbound or outbound network calls can be made, - * except for calls between peers within a training cluster for distributed training. If - * network isolation is used for training jobs that are configured to use a VPC, SageMaker - * downloads and uploads customer data and model artifacts through the specified VPC, but - * the training container does not have network access.

    - */ - EnableNetworkIsolation?: boolean; - - /** - *

    To encrypt all communications between ML compute instances in distributed training, - * choose True. Encryption provides greater security for distributed training, - * but training might take longer. How long it takes depends on the amount of communication - * between compute instances, especially if you use a deep learning algorithm in - * distributed training.

    - */ - EnableInterContainerTrafficEncryption?: boolean; - - /** - *

    A Boolean indicating whether managed spot training is enabled (True) or - * not (False).

    - */ - EnableManagedSpotTraining?: boolean; - - /** - *

    Contains information about the output location for managed spot training checkpoint - * data.

    - */ - CheckpointConfig?: CheckpointConfig; - - /** - *

    The number of times to retry the job when the job fails due to an - * InternalServerError.

    - */ - RetryStrategy?: RetryStrategy; -} - -export namespace HyperParameterTrainingJobDefinition { - /** - * @internal - */ - export const filterSensitiveLog = (obj: HyperParameterTrainingJobDefinition): any => ({ - ...obj, - }); -} - -/** - *

    A previously completed or stopped hyperparameter tuning job to be used as a starting - * point for a new hyperparameter tuning job.

    - */ -export interface ParentHyperParameterTuningJob { - /** - *

    The name of the hyperparameter tuning job to be used as a starting point for a new - * hyperparameter tuning job.

    - */ - HyperParameterTuningJobName?: string; -} - -export namespace ParentHyperParameterTuningJob { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ParentHyperParameterTuningJob): any => ({ - ...obj, - }); -} - -export enum HyperParameterTuningJobWarmStartType { - IDENTICAL_DATA_AND_ALGORITHM = "IdenticalDataAndAlgorithm", - TRANSFER_LEARNING = "TransferLearning", -} - -/** - *

    Specifies the configuration for a hyperparameter tuning job that uses one or more - * previous hyperparameter tuning jobs as a starting point. The results of previous tuning - * jobs are used to inform which combinations of hyperparameters to search over in the new - * tuning job.

    - *

    All training jobs launched by the new hyperparameter tuning job are evaluated by using - * the objective metric, and the training job that performs the best is compared to the - * best training jobs from the parent tuning jobs. From these, the training job that - * performs the best as measured by the objective metric is returned as the overall best - * training job.

    - * - *

    All training jobs launched by parent hyperparameter tuning jobs and the new - * hyperparameter tuning jobs count against the limit of training jobs for the tuning - * job.

    - *
    - */ -export interface HyperParameterTuningJobWarmStartConfig { - /** - *

    An array of hyperparameter tuning jobs that are used as the starting point for the new - * hyperparameter tuning job. For more information about warm starting a hyperparameter - * tuning job, see Using a Previous - * Hyperparameter Tuning Job as a Starting Point.

    - *

    Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent - * jobs for warm start tuning jobs.

    - */ - ParentHyperParameterTuningJobs: ParentHyperParameterTuningJob[] | undefined; - - /** - *

    Specifies one of the following:

    - *
    - *
    IDENTICAL_DATA_AND_ALGORITHM
    - *
    - *

    The new hyperparameter tuning job uses the same input data and training - * image as the parent tuning jobs. You can change the hyperparameter ranges to - * search and the maximum number of training jobs that the hyperparameter - * tuning job launches. You cannot use a new version of the training algorithm, - * unless the changes in the new version do not affect the algorithm itself. - * For example, changes that improve logging or adding support for a different - * data format are allowed. You can also change hyperparameters from tunable to - * static, and from static to tunable, but the total number of static plus - * tunable hyperparameters must remain the same as it is in all parent jobs. - * The objective metric for the new tuning job must be the same as for all - * parent jobs.

    - *
    - *
    TRANSFER_LEARNING
    - *
    - *

    The new hyperparameter tuning job can include input data, hyperparameter - * ranges, maximum number of concurrent training jobs, and maximum number of - * training jobs that are different than those of its parent hyperparameter - * tuning jobs. The training image can also be a different version from the - * version used in the parent hyperparameter tuning job. You can also change - * hyperparameters from tunable to static, and from static to tunable, but the - * total number of static plus tunable hyperparameters must remain the same as - * it is in all parent jobs. The objective metric for the new tuning job must - * be the same as for all parent jobs.

    - *
    - *
    - */ - WarmStartType: HyperParameterTuningJobWarmStartType | string | undefined; -} - -export namespace HyperParameterTuningJobWarmStartConfig { - /** - * @internal - */ - export const filterSensitiveLog = (obj: HyperParameterTuningJobWarmStartConfig): any => ({ - ...obj, - }); -} - -export interface CreateHyperParameterTuningJobRequest { - /** - *

    The name of the tuning job. This name is the prefix for the names of all training jobs - * that this tuning job launches. The name must be unique within the same Amazon Web Services account and - * Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, - * and : + = @ _ % - (hyphen). The name is not case sensitive.

    - */ - HyperParameterTuningJobName: string | undefined; - - /** - *

    The HyperParameterTuningJobConfig object that describes the tuning - * job, including the search strategy, the objective metric used to evaluate training jobs, - * ranges of parameters to search, and resource limits for the tuning job. For more - * information, see How - * Hyperparameter Tuning Works.

    - */ - HyperParameterTuningJobConfig: HyperParameterTuningJobConfig | undefined; - - /** - *

    The HyperParameterTrainingJobDefinition object that describes the - * training jobs that this tuning job launches, including static hyperparameters, input - * data configuration, output data configuration, resource configuration, and stopping - * condition.

    - */ - TrainingJobDefinition?: HyperParameterTrainingJobDefinition; - - /** - *

    A list of the HyperParameterTrainingJobDefinition objects launched - * for this tuning job.

    - */ - TrainingJobDefinitions?: HyperParameterTrainingJobDefinition[]; - - /** - *

    Specifies the configuration for starting the hyperparameter tuning job using one or - * more previous tuning jobs as a starting point. The results of previous tuning jobs are - * used to inform which combinations of hyperparameters to search over in the new tuning - * job.

    - *

    All training jobs launched by the new hyperparameter tuning job are evaluated by using - * the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM as the - * WarmStartType value for the warm start configuration, the training job - * that performs the best in the new tuning job is compared to the best training jobs from - * the parent tuning jobs. From these, the training job that performs the best as measured - * by the objective metric is returned as the overall best training job.

    - * - *

    All training jobs launched by parent hyperparameter tuning jobs and the new - * hyperparameter tuning jobs count against the limit of training jobs for the tuning - * job.

    - *
    - */ - WarmStartConfig?: HyperParameterTuningJobWarmStartConfig; - - /** - *

    An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in - * different ways, for example, by purpose, owner, or environment. For more information, - * see Tagging Amazon Web Services - * Resources.

    - *

    Tags that you specify for the tuning job are also added to all training jobs that the - * tuning job launches.

    - */ - Tags?: Tag[]; -} - -export namespace CreateHyperParameterTuningJobRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: CreateHyperParameterTuningJobRequest): any => ({ - ...obj, - }); -} - -export interface CreateHyperParameterTuningJobResponse { - /** - *

    The Amazon Resource Name (ARN) of the tuning job. SageMaker assigns an ARN to a - * hyperparameter tuning job when you create it.

    - */ - HyperParameterTuningJobArn: string | undefined; -} - -export namespace CreateHyperParameterTuningJobResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: CreateHyperParameterTuningJobResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-sagemaker/src/models/models_1.ts b/clients/client-sagemaker/src/models/models_1.ts index 6cf88a38f605b..c32a839669425 100644 --- a/clients/client-sagemaker/src/models/models_1.ts +++ b/clients/client-sagemaker/src/models/models_1.ts @@ -47,7 +47,10 @@ import { DataQualityBaselineConfig, DataQualityJobInput, DeploymentConfig, + DeviceSelectionConfig, DomainSettings, + EdgeDeploymentConfig, + EdgeDeploymentModelConfig, EdgeOutputConfig, EdgePresetDeploymentType, EndpointInput, @@ -58,14 +61,13 @@ import { HumanLoopActivationConfig, HumanLoopConfig, HumanLoopRequestSource, - HyperParameterTrainingJobDefinition, - HyperParameterTuningJobConfig, - HyperParameterTuningJobObjectiveType, - HyperParameterTuningJobWarmStartConfig, + HyperParameterTuningJobObjective, + HyperParameterTuningJobStrategyType, InferenceSpecification, InputConfig, KernelGatewayImageConfig, MetadataProperties, + MetricDefinition, MetricsSource, ModelApprovalStatus, ModelDeployConfig, @@ -76,11 +78,11 @@ import { MonitoringStatisticsResource, MonitoringStoppingCondition, NeoVpcConfig, - ObjectiveStatus, OfflineStoreConfig, OnlineStoreConfig, OutputConfig, OutputDataConfig, + ParameterRanges, ProblemType, ProcessingInstanceType, ProcessingS3DataDistributionType, @@ -92,20 +94,536 @@ import { ProductionVariantServerlessConfig, PublicWorkforceTaskPrice, ResourceConfig, + ResourceLimits, ResourceSpec, - RetryStrategy, StoppingCondition, Tag, + TrainingInputMode, + TrainingJobEarlyStoppingType, TrainingSpecification, TransformInput, TransformJobDefinition, TransformOutput, TransformResources, + TuningJobCompletionCriteria, UserContext, UserSettings, VpcConfig, } from "./models_0"; +/** + *

    Configures a hyperparameter tuning job.

    + */ +export interface HyperParameterTuningJobConfig { + /** + *

    Specifies how hyperparameter tuning chooses the combinations of hyperparameter values + * to use for the training job it launches. To use the Bayesian search strategy, set this + * to Bayesian. To randomly search, set it to Random. For + * information about search strategies, see How + * Hyperparameter Tuning Works.

    + */ + Strategy: HyperParameterTuningJobStrategyType | string | undefined; + + /** + *

    The HyperParameterTuningJobObjective object that specifies the + * objective + * metric for this tuning job.

    + */ + HyperParameterTuningJobObjective?: HyperParameterTuningJobObjective; + + /** + *

    The ResourceLimits object that specifies the + * maximum + * number of training jobs and parallel training jobs for this tuning + * job.

    + */ + ResourceLimits: ResourceLimits | undefined; + + /** + *

    The ParameterRanges object that specifies the ranges of + * hyperparameters + * that this tuning job searches.

    + */ + ParameterRanges?: ParameterRanges; + + /** + *

    Specifies whether to use early stopping for training jobs launched by the + * hyperparameter tuning job. This can be one of the following values (the default value is + * OFF):

    + *
    + *
    OFF
    + *
    + *

    Training jobs launched by the hyperparameter tuning job do not use early + * stopping.

    + *
    + *
    AUTO
    + *
    + *

    SageMaker stops training jobs launched by the hyperparameter tuning job when + * they are unlikely to perform better than previously completed training jobs. + * For more information, see Stop Training Jobs Early.

    + *
    + *
    + */ + TrainingJobEarlyStoppingType?: TrainingJobEarlyStoppingType | string; + + /** + *

    The tuning job's completion criteria.

    + */ + TuningJobCompletionCriteria?: TuningJobCompletionCriteria; +} + +export namespace HyperParameterTuningJobConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: HyperParameterTuningJobConfig): any => ({ + ...obj, + }); +} + +/** + *

    Specifies + * which + * training algorithm to use for training jobs that a hyperparameter + * tuning job launches and the metrics to monitor.

    + */ +export interface HyperParameterAlgorithmSpecification { + /** + *

    The registry path of the Docker image that contains the training algorithm. For + * information about Docker registry paths for built-in algorithms, see Algorithms + * Provided by Amazon SageMaker: Common Parameters. SageMaker supports both + * registry/repository[:tag] and registry/repository[@digest] + * image path formats. For more information, see Using Your Own Algorithms with Amazon + * SageMaker.

    + */ + TrainingImage?: string; + + /** + *

    The training input mode that the algorithm supports. For more information about input modes, see + * Algorithms.

    + * + *

    + * Pipe mode + *

    + *

    If an algorithm supports Pipe mode, Amazon SageMaker streams data directly + * from Amazon S3 to the container.

    + * + *

    + * File mode + *

    + *

    If an algorithm supports File mode, SageMaker + * downloads the training data from S3 to the provisioned ML storage volume, and mounts the + * directory to the Docker volume for the training container.

    + *

    You must provision the ML storage volume with sufficient capacity + * to accommodate the data downloaded from S3. In addition to the training data, the ML + * storage volume also stores the output model. The algorithm container uses the ML storage + * volume to also store intermediate information, if any.

    + *

    For distributed algorithms, training data is distributed uniformly. + * Your training duration is predictable if the input data objects sizes are + * approximately the same. SageMaker does not split the files any further for model training. + * If the object sizes are skewed, training won't be optimal as the data distribution is also + * skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in + * training.

    + * + *

    + * FastFile mode + *

    + *

    If an algorithm supports FastFile mode, SageMaker streams data directly + * from S3 to the container with no code changes, and provides file system access to + * the data. Users can author their training script to interact with these files as if + * they were stored on disk.

    + *

    + * FastFile mode works best when the data is read sequentially. + * Augmented manifest files aren't supported. + * The startup time is lower when there are fewer files in the S3 bucket provided.

    + */ + TrainingInputMode: TrainingInputMode | string | undefined; + + /** + *

    The name of the resource algorithm to use for the hyperparameter tuning job. If you + * specify a value for this parameter, do not specify a value for + * TrainingImage.

    + */ + AlgorithmName?: string; + + /** + *

    An array of MetricDefinition objects that specify the + * metrics + * that the algorithm emits.

    + */ + MetricDefinitions?: MetricDefinition[]; +} + +export namespace HyperParameterAlgorithmSpecification { + /** + * @internal + */ + export const filterSensitiveLog = (obj: HyperParameterAlgorithmSpecification): any => ({ + ...obj, + }); +} + +/** + *

    The retry strategy to use when a training job fails due to an + * InternalServerError. RetryStrategy is specified as part of + * the CreateTrainingJob and CreateHyperParameterTuningJob + * requests. You can add the StoppingCondition parameter to the request to + * limit the training time for the complete job.

    + */ +export interface RetryStrategy { + /** + *

    The number of times to retry the job. When the job is retried, it's + * SecondaryStatus is changed to STARTING.

    + */ + MaximumRetryAttempts: number | undefined; +} + +export namespace RetryStrategy { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RetryStrategy): any => ({ + ...obj, + }); +} + +/** + *

    Defines + * the training jobs launched by a hyperparameter tuning job.

    + */ +export interface HyperParameterTrainingJobDefinition { + /** + *

    The job definition name.

    + */ + DefinitionName?: string; + + /** + *

    Defines the objective metric for a hyperparameter tuning job. + * Hyperparameter + * tuning uses the value of this metric to evaluate the training jobs it launches, and + * returns the training job that results in either the highest or lowest value for this + * metric, depending on the value you specify for the Type + * parameter.

    + */ + TuningObjective?: HyperParameterTuningJobObjective; + + /** + *

    Specifies ranges of integer, continuous, and categorical hyperparameters that a + * hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs + * with hyperparameter values within these ranges to find the combination of values that + * result in the training job with the best performance as measured by the objective metric + * of the hyperparameter tuning job.

    + * + *

    The maximum number of items specified for Array Members refers to + * the maximum number of hyperparameters for each range and also the maximum for the + * hyperparameter tuning job itself. That is, the sum of the number of hyperparameters + * for all the ranges can't exceed the maximum number specified.

    + *
    + */ + HyperParameterRanges?: ParameterRanges; + + /** + *

    Specifies the values of hyperparameters + * that + * do not change for the tuning job.

    + */ + StaticHyperParameters?: Record; + + /** + *

    The HyperParameterAlgorithmSpecification object that + * specifies + * the resource algorithm to use for the training jobs that the tuning + * job launches.

    + */ + AlgorithmSpecification: HyperParameterAlgorithmSpecification | undefined; + + /** + *

    The Amazon Resource Name (ARN) of the + * IAM + * role associated with the training jobs that the tuning job + * launches.

    + */ + RoleArn: string | undefined; + + /** + *

    An array of Channel objects that specify + * the + * input for the training jobs that the tuning job launches.

    + */ + InputDataConfig?: Channel[]; + + /** + *

    The VpcConfig object that + * specifies + * the VPC that you want the training jobs that this hyperparameter + * tuning job launches to connect to. Control access to and from your + * training + * container by configuring the VPC. For more information, see Protect Training Jobs + * by Using an Amazon Virtual Private Cloud.

    + */ + VpcConfig?: VpcConfig; + + /** + *

    Specifies the path to the Amazon S3 bucket where you + * store + * model artifacts from the training jobs that the tuning job + * launches.

    + */ + OutputDataConfig: OutputDataConfig | undefined; + + /** + *

    The resources, + * including + * the compute instances and storage volumes, to use for the training + * jobs that the tuning job launches.

    + *

    Storage volumes store model artifacts and + * incremental + * states. Training algorithms might also use storage volumes for + * scratch + * space. If you want SageMaker to use the storage volume to store the + * training data, choose File as the TrainingInputMode in the + * algorithm specification. For distributed training algorithms, specify an instance count + * greater than 1.

    + */ + ResourceConfig: ResourceConfig | undefined; + + /** + *

    Specifies a limit to how long a model hyperparameter training job can run. It also + * specifies how long a managed spot training job has to complete. When the job reaches the + * time limit, SageMaker ends the training job. Use this API to cap model training costs.

    + */ + StoppingCondition: StoppingCondition | undefined; + + /** + *

    Isolates the training container. No inbound or outbound network calls can be made, + * except for calls between peers within a training cluster for distributed training. If + * network isolation is used for training jobs that are configured to use a VPC, SageMaker + * downloads and uploads customer data and model artifacts through the specified VPC, but + * the training container does not have network access.

    + */ + EnableNetworkIsolation?: boolean; + + /** + *

    To encrypt all communications between ML compute instances in distributed training, + * choose True. Encryption provides greater security for distributed training, + * but training might take longer. How long it takes depends on the amount of communication + * between compute instances, especially if you use a deep learning algorithm in + * distributed training.

    + */ + EnableInterContainerTrafficEncryption?: boolean; + + /** + *

    A Boolean indicating whether managed spot training is enabled (True) or + * not (False).

    + */ + EnableManagedSpotTraining?: boolean; + + /** + *

    Contains information about the output location for managed spot training checkpoint + * data.

    + */ + CheckpointConfig?: CheckpointConfig; + + /** + *

    The number of times to retry the job when the job fails due to an + * InternalServerError.

    + */ + RetryStrategy?: RetryStrategy; +} + +export namespace HyperParameterTrainingJobDefinition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: HyperParameterTrainingJobDefinition): any => ({ + ...obj, + }); +} + +/** + *

    A previously completed or stopped hyperparameter tuning job to be used as a starting + * point for a new hyperparameter tuning job.

    + */ +export interface ParentHyperParameterTuningJob { + /** + *

    The name of the hyperparameter tuning job to be used as a starting point for a new + * hyperparameter tuning job.

    + */ + HyperParameterTuningJobName?: string; +} + +export namespace ParentHyperParameterTuningJob { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ParentHyperParameterTuningJob): any => ({ + ...obj, + }); +} + +export enum HyperParameterTuningJobWarmStartType { + IDENTICAL_DATA_AND_ALGORITHM = "IdenticalDataAndAlgorithm", + TRANSFER_LEARNING = "TransferLearning", +} + +/** + *

    Specifies the configuration for a hyperparameter tuning job that uses one or more + * previous hyperparameter tuning jobs as a starting point. The results of previous tuning + * jobs are used to inform which combinations of hyperparameters to search over in the new + * tuning job.

    + *

    All training jobs launched by the new hyperparameter tuning job are evaluated by using + * the objective metric, and the training job that performs the best is compared to the + * best training jobs from the parent tuning jobs. From these, the training job that + * performs the best as measured by the objective metric is returned as the overall best + * training job.

    + * + *

    All training jobs launched by parent hyperparameter tuning jobs and the new + * hyperparameter tuning jobs count against the limit of training jobs for the tuning + * job.

    + *
    + */ +export interface HyperParameterTuningJobWarmStartConfig { + /** + *

    An array of hyperparameter tuning jobs that are used as the starting point for the new + * hyperparameter tuning job. For more information about warm starting a hyperparameter + * tuning job, see Using a Previous + * Hyperparameter Tuning Job as a Starting Point.

    + *

    Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent + * jobs for warm start tuning jobs.

    + */ + ParentHyperParameterTuningJobs: ParentHyperParameterTuningJob[] | undefined; + + /** + *

    Specifies one of the following:

    + *
    + *
    IDENTICAL_DATA_AND_ALGORITHM
    + *
    + *

    The new hyperparameter tuning job uses the same input data and training + * image as the parent tuning jobs. You can change the hyperparameter ranges to + * search and the maximum number of training jobs that the hyperparameter + * tuning job launches. You cannot use a new version of the training algorithm, + * unless the changes in the new version do not affect the algorithm itself. + * For example, changes that improve logging or adding support for a different + * data format are allowed. You can also change hyperparameters from tunable to + * static, and from static to tunable, but the total number of static plus + * tunable hyperparameters must remain the same as it is in all parent jobs. + * The objective metric for the new tuning job must be the same as for all + * parent jobs.

    + *
    + *
    TRANSFER_LEARNING
    + *
    + *

    The new hyperparameter tuning job can include input data, hyperparameter + * ranges, maximum number of concurrent training jobs, and maximum number of + * training jobs that are different than those of its parent hyperparameter + * tuning jobs. The training image can also be a different version from the + * version used in the parent hyperparameter tuning job. You can also change + * hyperparameters from tunable to static, and from static to tunable, but the + * total number of static plus tunable hyperparameters must remain the same as + * it is in all parent jobs. The objective metric for the new tuning job must + * be the same as for all parent jobs.

    + *
    + *
    + */ + WarmStartType: HyperParameterTuningJobWarmStartType | string | undefined; +} + +export namespace HyperParameterTuningJobWarmStartConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: HyperParameterTuningJobWarmStartConfig): any => ({ + ...obj, + }); +} + +export interface CreateHyperParameterTuningJobRequest { + /** + *

    The name of the tuning job. This name is the prefix for the names of all training jobs + * that this tuning job launches. The name must be unique within the same Amazon Web Services account and + * Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, + * and : + = @ _ % - (hyphen). The name is not case sensitive.

    + */ + HyperParameterTuningJobName: string | undefined; + + /** + *

    The HyperParameterTuningJobConfig object that describes the tuning + * job, including the search strategy, the objective metric used to evaluate training jobs, + * ranges of parameters to search, and resource limits for the tuning job. For more + * information, see How + * Hyperparameter Tuning Works.

    + */ + HyperParameterTuningJobConfig: HyperParameterTuningJobConfig | undefined; + + /** + *

    The HyperParameterTrainingJobDefinition object that describes the + * training jobs that this tuning job launches, including static hyperparameters, input + * data configuration, output data configuration, resource configuration, and stopping + * condition.

    + */ + TrainingJobDefinition?: HyperParameterTrainingJobDefinition; + + /** + *

    A list of the HyperParameterTrainingJobDefinition objects launched + * for this tuning job.

    + */ + TrainingJobDefinitions?: HyperParameterTrainingJobDefinition[]; + + /** + *

    Specifies the configuration for starting the hyperparameter tuning job using one or + * more previous tuning jobs as a starting point. The results of previous tuning jobs are + * used to inform which combinations of hyperparameters to search over in the new tuning + * job.

    + *

    All training jobs launched by the new hyperparameter tuning job are evaluated by using + * the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM as the + * WarmStartType value for the warm start configuration, the training job + * that performs the best in the new tuning job is compared to the best training jobs from + * the parent tuning jobs. From these, the training job that performs the best as measured + * by the objective metric is returned as the overall best training job.

    + * + *

    All training jobs launched by parent hyperparameter tuning jobs and the new + * hyperparameter tuning jobs count against the limit of training jobs for the tuning + * job.

    + *
    + */ + WarmStartConfig?: HyperParameterTuningJobWarmStartConfig; + + /** + *

    An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in + * different ways, for example, by purpose, owner, or environment. For more information, + * see Tagging Amazon Web Services + * Resources.

    + *

    Tags that you specify for the tuning job are also added to all training jobs that the + * tuning job launches.

    + */ + Tags?: Tag[]; +} + +export namespace CreateHyperParameterTuningJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateHyperParameterTuningJobRequest): any => ({ + ...obj, + }); +} + +export interface CreateHyperParameterTuningJobResponse { + /** + *

    The Amazon Resource Name (ARN) of the tuning job. SageMaker assigns an ARN to a + * hyperparameter tuning job when you create it.

    + */ + HyperParameterTuningJobArn: string | undefined; +} + +export namespace CreateHyperParameterTuningJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateHyperParameterTuningJobResponse): any => ({ + ...obj, + }); +} + export interface CreateImageRequest { /** *

    The description of the image.

    @@ -7561,30 +8079,67 @@ export namespace DeleteDomainRequest { }); } -export interface DeleteEndpointInput { +export interface DeleteEdgeDeploymentPlanRequest { /** - *

    The name of the endpoint that you want to delete.

    + *

    The name of the edge deployment plan to delete.

    */ - EndpointName: string | undefined; + EdgeDeploymentPlanName: string | undefined; } -export namespace DeleteEndpointInput { +export namespace DeleteEdgeDeploymentPlanRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DeleteEndpointInput): any => ({ + export const filterSensitiveLog = (obj: DeleteEdgeDeploymentPlanRequest): any => ({ ...obj, }); } -export interface DeleteEndpointConfigInput { +export interface DeleteEdgeDeploymentStageRequest { /** - *

    The name of the endpoint configuration that you want to delete.

    + *

    The name of the edge deployment plan from which the stage will be deleted.

    */ - EndpointConfigName: string | undefined; -} + EdgeDeploymentPlanName: string | undefined; -export namespace DeleteEndpointConfigInput { + /** + *

    The name of the stage.

    + */ + StageName: string | undefined; +} + +export namespace DeleteEdgeDeploymentStageRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEdgeDeploymentStageRequest): any => ({ + ...obj, + }); +} + +export interface DeleteEndpointInput { + /** + *

    The name of the endpoint that you want to delete.

    + */ + EndpointName: string | undefined; +} + +export namespace DeleteEndpointInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEndpointInput): any => ({ + ...obj, + }); +} + +export interface DeleteEndpointConfigInput { + /** + *

    The name of the endpoint configuration that you want to delete.

    + */ + EndpointConfigName: string | undefined; +} + +export namespace DeleteEndpointConfigInput { /** * @internal */ @@ -8202,6 +8757,95 @@ export namespace DeployedImage { }); } +export enum StageStatus { + Creating = "CREATING", + Deployed = "DEPLOYED", + Failed = "FAILED", + InProgress = "INPROGRESS", + ReadyToDeploy = "READYTODEPLOY", + Starting = "STARTING", + Stopped = "STOPPED", + Stopping = "STOPPING", +} + +/** + *

    Contains information summarizing the deployment stage results.

    + */ +export interface EdgeDeploymentStatus { + /** + *

    The general status of the current stage.

    + */ + StageStatus: StageStatus | string | undefined; + + /** + *

    The number of edge devices with the successful deployment in the current stage.

    + */ + EdgeDeploymentSuccessInStage: number | undefined; + + /** + *

    The number of edge devices yet to pick up the deployment in current stage, or in progress.

    + */ + EdgeDeploymentPendingInStage: number | undefined; + + /** + *

    The number of edge devices that failed the deployment in current stage.

    + */ + EdgeDeploymentFailedInStage: number | undefined; + + /** + *

    A detailed message about deployment status in current stage.

    + */ + EdgeDeploymentStatusMessage?: string; + + /** + *

    The time when the deployment API started.

    + */ + EdgeDeploymentStageStartTime?: Date; +} + +export namespace EdgeDeploymentStatus { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EdgeDeploymentStatus): any => ({ + ...obj, + }); +} + +/** + *

    Contains information summarizing the deployment stage results.

    + */ +export interface DeploymentStageStatusSummary { + /** + *

    The name of the stage.

    + */ + StageName: string | undefined; + + /** + *

    Configuration of the devices in the stage.

    + */ + DeviceSelectionConfig: DeviceSelectionConfig | undefined; + + /** + *

    Configuration of the deployment details.

    + */ + DeploymentConfig: EdgeDeploymentConfig | undefined; + + /** + *

    General status of the current state.

    + */ + DeploymentStatus: EdgeDeploymentStatus | undefined; +} + +export namespace DeploymentStageStatusSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentStageStatusSummary): any => ({ + ...obj, + }); +} + export interface DeregisterDevicesRequest { /** *

    The name of the fleet the devices belong to.

    @@ -9570,6 +10214,98 @@ export namespace DescribeDomainResponse { }); } +export interface DescribeEdgeDeploymentPlanRequest { + /** + *

    The name of the deployment plan to describe.

    + */ + EdgeDeploymentPlanName: string | undefined; + + /** + *

    If the edge deployment plan has enough stages to require tokening, then this is the response from the last list of stages returned.

    + */ + NextToken?: string; + + /** + *

    The maximum number of results to select (50 by default).

    + */ + MaxResults?: number; +} + +export namespace DescribeEdgeDeploymentPlanRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeEdgeDeploymentPlanRequest): any => ({ + ...obj, + }); +} + +export interface DescribeEdgeDeploymentPlanResponse { + /** + *

    The ARN of edge deployment plan.

    + */ + EdgeDeploymentPlanArn: string | undefined; + + /** + *

    The name of the edge deployment plan.

    + */ + EdgeDeploymentPlanName: string | undefined; + + /** + *

    List of models associated with the edge deployment plan.

    + */ + ModelConfigs: EdgeDeploymentModelConfig[] | undefined; + + /** + *

    The device fleet used for this edge deployment plan.

    + */ + DeviceFleetName: string | undefined; + + /** + *

    The number of edge devices with the successful deployment.

    + */ + EdgeDeploymentSuccess?: number; + + /** + *

    The number of edge devices yet to pick up deployment, or in progress.

    + */ + EdgeDeploymentPending?: number; + + /** + *

    The number of edge devices that failed the deployment.

    + */ + EdgeDeploymentFailed?: number; + + /** + *

    List of stages in the edge deployment plan.

    + */ + Stages: DeploymentStageStatusSummary[] | undefined; + + /** + *

    Token to use when calling the next set of stages in the edge deployment plan.

    + */ + NextToken?: string; + + /** + *

    The time when the edge deployment plan was created.

    + */ + CreationTime?: Date; + + /** + *

    The time when the edge deployment plan was last updated.

    + */ + LastModifiedTime?: Date; +} + +export namespace DescribeEdgeDeploymentPlanResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeEdgeDeploymentPlanResponse): any => ({ + ...obj, + }); +} + export interface DescribeEdgePackagingJobRequest { /** *

    The name of the edge packaging job.

    @@ -10755,590 +11491,3 @@ export namespace DescribeHumanTaskUiResponse { ...obj, }); } - -export interface DescribeHyperParameterTuningJobRequest { - /** - *

    The name of the tuning job.

    - */ - HyperParameterTuningJobName: string | undefined; -} - -export namespace DescribeHyperParameterTuningJobRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeHyperParameterTuningJobRequest): any => ({ - ...obj, - }); -} - -/** - *

    Shows the final value for the - * objective - * metric for a training job that was launched by a hyperparameter - * tuning job. You define the objective metric in the - * HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig.

    - */ -export interface FinalHyperParameterTuningJobObjectiveMetric { - /** - *

    Whether to - * minimize - * or maximize the objective metric. Valid values are Minimize and - * Maximize.

    - */ - Type?: HyperParameterTuningJobObjectiveType | string; - - /** - *

    The name of the - * objective - * metric.

    - */ - MetricName: string | undefined; - - /** - *

    The value of the objective metric.

    - */ - Value: number | undefined; -} - -export namespace FinalHyperParameterTuningJobObjectiveMetric { - /** - * @internal - */ - export const filterSensitiveLog = (obj: FinalHyperParameterTuningJobObjectiveMetric): any => ({ - ...obj, - }); -} - -export enum TrainingJobStatus { - COMPLETED = "Completed", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - STOPPED = "Stopped", - STOPPING = "Stopping", -} - -/** - *

    The container for the summary information about a training job.

    - */ -export interface HyperParameterTrainingJobSummary { - /** - *

    The training job definition name.

    - */ - TrainingJobDefinitionName?: string; - - /** - *

    The name of the training job.

    - */ - TrainingJobName: string | undefined; - - /** - *

    The Amazon Resource Name (ARN) of the training job.

    - */ - TrainingJobArn: string | undefined; - - /** - *

    The HyperParameter tuning job that launched the training job.

    - */ - TuningJobName?: string; - - /** - *

    The date and time that the training job was created.

    - */ - CreationTime: Date | undefined; - - /** - *

    The date and time that the training job started.

    - */ - TrainingStartTime?: Date; - - /** - *

    Specifies the time when the training job ends on training instances. You are billed - * for the time interval between the value of TrainingStartTime and this time. - * For successful jobs and stopped jobs, this is the time after model artifacts are - * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.

    - */ - TrainingEndTime?: Date; - - /** - *

    The - * status - * of the training job.

    - */ - TrainingJobStatus: TrainingJobStatus | string | undefined; - - /** - *

    A - * list of the hyperparameters for which you specified ranges to - * search.

    - */ - TunedHyperParameters: Record | undefined; - - /** - *

    The - * reason that the training job failed. - *

    - */ - FailureReason?: string; - - /** - *

    The FinalHyperParameterTuningJobObjectiveMetric object that - * specifies the - * value - * of the - * objective - * metric of the tuning job that launched this training job.

    - */ - FinalHyperParameterTuningJobObjectiveMetric?: FinalHyperParameterTuningJobObjectiveMetric; - - /** - *

    The status of the objective metric for the training job:

    - *
      - *
    • - *

      Succeeded: The - * final - * objective metric for the training job was evaluated by the - * hyperparameter tuning job and - * used - * in the hyperparameter tuning process.

      - *
    • - *
    - *
      - *
    • - *

      Pending: The training job is in progress and evaluation of its final objective - * metric is pending.

      - *
    • - *
    - *
      - *
    • - *

      Failed: - * The final objective metric for the training job was not evaluated, and was not - * used in the hyperparameter tuning process. This typically occurs when the - * training job failed or did not emit an objective - * metric.

      - *
    • - *
    - */ - ObjectiveStatus?: ObjectiveStatus | string; -} - -export namespace HyperParameterTrainingJobSummary { - /** - * @internal - */ - export const filterSensitiveLog = (obj: HyperParameterTrainingJobSummary): any => ({ - ...obj, - }); -} - -export enum HyperParameterTuningJobStatus { - COMPLETED = "Completed", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - STOPPED = "Stopped", - STOPPING = "Stopping", -} - -/** - *

    Specifies the number of training jobs that this hyperparameter tuning job launched, - * categorized by the status of their objective metric. The objective metric status shows - * whether the - * final - * objective metric for the training job has been evaluated by the - * tuning job and used in the hyperparameter tuning process.

    - */ -export interface ObjectiveStatusCounters { - /** - *

    The number of training jobs whose final objective metric was evaluated by the - * hyperparameter tuning job and used in the hyperparameter tuning process.

    - */ - Succeeded?: number; - - /** - *

    The number of training jobs that are in progress and pending evaluation of their final - * objective metric.

    - */ - Pending?: number; - - /** - *

    The number of training jobs whose final objective metric was not evaluated and used in - * the hyperparameter tuning process. This typically occurs when the training job failed or - * did not emit an objective metric.

    - */ - Failed?: number; -} - -export namespace ObjectiveStatusCounters { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ObjectiveStatusCounters): any => ({ - ...obj, - }); -} - -/** - *

    The numbers of training jobs launched by a hyperparameter tuning job, categorized by - * status.

    - */ -export interface TrainingJobStatusCounters { - /** - *

    The number of completed training jobs launched by the hyperparameter tuning - * job.

    - */ - Completed?: number; - - /** - *

    The number of in-progress training jobs launched by a hyperparameter tuning - * job.

    - */ - InProgress?: number; - - /** - *

    The number of training jobs that failed, but can be retried. A failed training job can - * be retried only if it failed because an internal service error occurred.

    - */ - RetryableError?: number; - - /** - *

    The number of training jobs that failed and can't be retried. A failed training job - * can't be retried if it failed because a client error occurred.

    - */ - NonRetryableError?: number; - - /** - *

    The number of training jobs launched by a hyperparameter tuning job that were - * manually - * stopped.

    - */ - Stopped?: number; -} - -export namespace TrainingJobStatusCounters { - /** - * @internal - */ - export const filterSensitiveLog = (obj: TrainingJobStatusCounters): any => ({ - ...obj, - }); -} - -export interface DescribeHyperParameterTuningJobResponse { - /** - *

    The name of the tuning job.

    - */ - HyperParameterTuningJobName: string | undefined; - - /** - *

    The - * Amazon Resource Name (ARN) of the tuning job.

    - */ - HyperParameterTuningJobArn: string | undefined; - - /** - *

    The HyperParameterTuningJobConfig object that specifies the - * configuration of the tuning job.

    - */ - HyperParameterTuningJobConfig: HyperParameterTuningJobConfig | undefined; - - /** - *

    The HyperParameterTrainingJobDefinition object that specifies the - * definition of the training jobs that this tuning job launches.

    - */ - TrainingJobDefinition?: HyperParameterTrainingJobDefinition; - - /** - *

    A list of the HyperParameterTrainingJobDefinition objects launched - * for this tuning job.

    - */ - TrainingJobDefinitions?: HyperParameterTrainingJobDefinition[]; - - /** - *

    The status of the tuning job: InProgress, Completed, Failed, Stopping, or - * Stopped.

    - */ - HyperParameterTuningJobStatus: HyperParameterTuningJobStatus | string | undefined; - - /** - *

    The date and time that the tuning job started.

    - */ - CreationTime: Date | undefined; - - /** - *

    The date and time that the tuning job ended.

    - */ - HyperParameterTuningEndTime?: Date; - - /** - *

    The date and time that the status of the tuning job was modified.

    - */ - LastModifiedTime?: Date; - - /** - *

    The TrainingJobStatusCounters object that specifies the number of - * training jobs, categorized by status, that this tuning job launched.

    - */ - TrainingJobStatusCounters: TrainingJobStatusCounters | undefined; - - /** - *

    The ObjectiveStatusCounters object that specifies the number of - * training jobs, categorized by the status of their final objective metric, that this - * tuning job launched.

    - */ - ObjectiveStatusCounters: ObjectiveStatusCounters | undefined; - - /** - *

    A TrainingJobSummary object that describes the training job that - * completed with the best current HyperParameterTuningJobObjective.

    - */ - BestTrainingJob?: HyperParameterTrainingJobSummary; - - /** - *

    If the hyperparameter tuning job is an warm start tuning job with a - * WarmStartType of IDENTICAL_DATA_AND_ALGORITHM, this is the - * TrainingJobSummary for the training job with the best objective - * metric value of all training jobs launched by this tuning job and all parent jobs - * specified for the warm start tuning job.

    - */ - OverallBestTrainingJob?: HyperParameterTrainingJobSummary; - - /** - *

    The configuration for starting the hyperparameter parameter tuning job using one or - * more previous tuning jobs as a starting point. The results of previous tuning jobs are - * used to inform which combinations of hyperparameters to search over in the new tuning - * job.

    - */ - WarmStartConfig?: HyperParameterTuningJobWarmStartConfig; - - /** - *

    If the tuning job failed, the reason it failed.

    - */ - FailureReason?: string; -} - -export namespace DescribeHyperParameterTuningJobResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeHyperParameterTuningJobResponse): any => ({ - ...obj, - }); -} - -export interface DescribeImageRequest { - /** - *

    The name of the image to describe.

    - */ - ImageName: string | undefined; -} - -export namespace DescribeImageRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeImageRequest): any => ({ - ...obj, - }); -} - -export enum ImageStatus { - CREATED = "CREATED", - CREATE_FAILED = "CREATE_FAILED", - CREATING = "CREATING", - DELETE_FAILED = "DELETE_FAILED", - DELETING = "DELETING", - UPDATE_FAILED = "UPDATE_FAILED", - UPDATING = "UPDATING", -} - -export interface DescribeImageResponse { - /** - *

    When the image was created.

    - */ - CreationTime?: Date; - - /** - *

    The description of the image.

    - */ - Description?: string; - - /** - *

    The name of the image as displayed.

    - */ - DisplayName?: string; - - /** - *

    When a create, update, or delete operation fails, the reason for the failure.

    - */ - FailureReason?: string; - - /** - *

    The Amazon Resource Name (ARN) of the image.

    - */ - ImageArn?: string; - - /** - *

    The name of the image.

    - */ - ImageName?: string; - - /** - *

    The status of the image.

    - */ - ImageStatus?: ImageStatus | string; - - /** - *

    When the image was last modified.

    - */ - LastModifiedTime?: Date; - - /** - *

    The Amazon Resource Name (ARN) of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

    - */ - RoleArn?: string; -} - -export namespace DescribeImageResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeImageResponse): any => ({ - ...obj, - }); -} - -export interface DescribeImageVersionRequest { - /** - *

    The name of the image.

    - */ - ImageName: string | undefined; - - /** - *

    The version of the image. If not specified, the latest version is described.

    - */ - Version?: number; -} - -export namespace DescribeImageVersionRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeImageVersionRequest): any => ({ - ...obj, - }); -} - -export enum ImageVersionStatus { - CREATED = "CREATED", - CREATE_FAILED = "CREATE_FAILED", - CREATING = "CREATING", - DELETE_FAILED = "DELETE_FAILED", - DELETING = "DELETING", -} - -export interface DescribeImageVersionResponse { - /** - *

    The registry path of the container image on which this image version is based.

    - */ - BaseImage?: string; - - /** - *

    The registry path of the container image that contains this image version.

    - */ - ContainerImage?: string; - - /** - *

    When the version was created.

    - */ - CreationTime?: Date; - - /** - *

    When a create or delete operation fails, the reason for the failure.

    - */ - FailureReason?: string; - - /** - *

    The Amazon Resource Name (ARN) of the image the version is based on.

    - */ - ImageArn?: string; - - /** - *

    The ARN of the version.

    - */ - ImageVersionArn?: string; - - /** - *

    The status of the version.

    - */ - ImageVersionStatus?: ImageVersionStatus | string; - - /** - *

    When the version was last modified.

    - */ - LastModifiedTime?: Date; - - /** - *

    The version number.

    - */ - Version?: number; -} - -export namespace DescribeImageVersionResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeImageVersionResponse): any => ({ - ...obj, - }); -} - -export interface DescribeInferenceRecommendationsJobRequest { - /** - *

    The name of the job. The name must be unique within an - * Amazon Web Services Region in the Amazon Web Services account.

    - */ - JobName: string | undefined; -} - -export namespace DescribeInferenceRecommendationsJobRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeInferenceRecommendationsJobRequest): any => ({ - ...obj, - }); -} - -/** - *

    The endpoint configuration made by Inference Recommender during a recommendation job.

    - */ -export interface EndpointOutputConfiguration { - /** - *

    The name of the endpoint made during a recommendation job.

    - */ - EndpointName: string | undefined; - - /** - *

    The name of the production variant (deployed model) made during a recommendation job.

    - */ - VariantName: string | undefined; - - /** - *

    The instance type recommended by Amazon SageMaker Inference Recommender.

    - */ - InstanceType: ProductionVariantInstanceType | string | undefined; - - /** - *

    The number of instances recommended to launch initially.

    - */ - InitialInstanceCount: number | undefined; -} - -export namespace EndpointOutputConfiguration { - /** - * @internal - */ - export const filterSensitiveLog = (obj: EndpointOutputConfiguration): any => ({ - ...obj, - }); -} diff --git a/clients/client-sagemaker/src/models/models_2.ts b/clients/client-sagemaker/src/models/models_2.ts index 38495b0644841..8b24b76153230 100644 --- a/clients/client-sagemaker/src/models/models_2.ts +++ b/clients/client-sagemaker/src/models/models_2.ts @@ -20,25 +20,22 @@ import { AutoMLSortBy, AutoMLSortOrder, BatchStrategy, - CacheHitResult, - CallbackStepMetadata, CandidateSortBy, CandidateStatus, Channel, CheckpointConfig, - ClarifyCheckStepMetadata, CodeRepositorySortBy, CodeRepositorySortOrder, CodeRepositorySummary, CognitoConfig, CompilationJobStatus, CompilationJobSummary, - ConditionStepMetadata, ContainerDefinition, ContextSummary, EdgeOutputConfig, FeatureDefinition, FeatureType, + HyperParameterTuningJobObjectiveType, HyperParameterTuningJobStrategyType, InferenceSpecification, MetadataProperties, @@ -48,14 +45,15 @@ import { MonitoringOutputConfig, MonitoringResources, MonitoringStoppingCondition, + ObjectiveStatus, OfflineStoreConfig, OnlineStoreConfig, OutputDataConfig, OutputParameter, + ProductionVariantInstanceType, ResourceConfig, ResourceLimits, ResourceSpec, - RetryStrategy, StoppingCondition, Tag, TransformInput, @@ -76,7 +74,6 @@ import { DomainStatus, DriftCheckBaselines, EdgePackagingJobStatus, - EndpointOutputConfiguration, EndpointStatus, ExperimentConfig, ExperimentSource, @@ -84,9 +81,9 @@ import { FeatureParameter, FlowDefinitionStatus, HumanTaskConfig, - HyperParameterTuningJobStatus, - ImageStatus, - ImageVersionStatus, + HyperParameterTrainingJobDefinition, + HyperParameterTuningJobConfig, + HyperParameterTuningJobWarmStartConfig, InferenceExecutionConfig, InstanceMetadataServiceConfiguration, LabelingJobAlgorithmsConfig, @@ -114,7 +111,6 @@ import { NotebookInstanceAcceleratorType, NotebookInstanceLifecycleHook, NotificationConfiguration, - ObjectiveStatusCounters, OfflineStoreStatus, OfflineStoreStatusValue, ParallelismConfiguration, @@ -128,6 +124,7 @@ import { RecommendationJobInputConfig, RecommendationJobStoppingConditions, RecommendationJobType, + RetryStrategy, RootAccess, RuleEvaluationStatus, ServiceCatalogProvisioningDetails, @@ -135,1140 +132,1259 @@ import { SourceIpConfig, StudioLifecycleConfigAppType, TensorBoardOutputConfig, - TrainingJobStatus, - TrainingJobStatusCounters, TrialComponentArtifact, TrialComponentParameterValue, TrialComponentStatus, } from "./models_1"; -/** - *

    The metrics of recommendations.

    - */ -export interface RecommendationMetrics { +export interface DescribeHyperParameterTuningJobRequest { /** - *

    Defines the cost per hour for the instance.

    + *

    The name of the tuning job.

    */ - CostPerHour: number | undefined; + HyperParameterTuningJobName: string | undefined; +} +export namespace DescribeHyperParameterTuningJobRequest { /** - *

    Defines the cost per inference for the instance .

    + * @internal */ - CostPerInference: number | undefined; + export const filterSensitiveLog = (obj: DescribeHyperParameterTuningJobRequest): any => ({ + ...obj, + }); +} +/** + *

    Shows the final value for the + * objective + * metric for a training job that was launched by a hyperparameter + * tuning job. You define the objective metric in the + * HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig.

    + */ +export interface FinalHyperParameterTuningJobObjectiveMetric { /** - *

    The expected maximum number of requests per minute for the instance.

    + *

    Whether to + * minimize + * or maximize the objective metric. Valid values are Minimize and + * Maximize.

    */ - MaxInvocations: number | undefined; + Type?: HyperParameterTuningJobObjectiveType | string; /** - *

    The expected model latency at maximum invocation per minute for the instance.

    + *

    The name of the + * objective + * metric.

    */ - ModelLatency: number | undefined; + MetricName: string | undefined; + + /** + *

    The value of the objective metric.

    + */ + Value: number | undefined; } -export namespace RecommendationMetrics { +export namespace FinalHyperParameterTuningJobObjectiveMetric { /** * @internal */ - export const filterSensitiveLog = (obj: RecommendationMetrics): any => ({ + export const filterSensitiveLog = (obj: FinalHyperParameterTuningJobObjectiveMetric): any => ({ ...obj, }); } +export enum TrainingJobStatus { + COMPLETED = "Completed", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + STOPPED = "Stopped", + STOPPING = "Stopping", +} + /** - *

    A list of environment parameters suggested by the Amazon SageMaker Inference Recommender.

    + *

    The container for the summary information about a training job.

    */ -export interface EnvironmentParameter { +export interface HyperParameterTrainingJobSummary { /** - *

    The environment key suggested by the Amazon SageMaker Inference Recommender.

    + *

    The training job definition name.

    */ - Key: string | undefined; + TrainingJobDefinitionName?: string; /** - *

    The value type suggested by the Amazon SageMaker Inference Recommender.

    + *

    The name of the training job.

    */ - ValueType: string | undefined; + TrainingJobName: string | undefined; /** - *

    The value suggested by the Amazon SageMaker Inference Recommender.

    + *

    The Amazon Resource Name (ARN) of the training job.

    */ - Value: string | undefined; + TrainingJobArn: string | undefined; + + /** + *

    The HyperParameter tuning job that launched the training job.

    + */ + TuningJobName?: string; + + /** + *

    The date and time that the training job was created.

    + */ + CreationTime: Date | undefined; + + /** + *

    The date and time that the training job started.

    + */ + TrainingStartTime?: Date; + + /** + *

    Specifies the time when the training job ends on training instances. You are billed + * for the time interval between the value of TrainingStartTime and this time. + * For successful jobs and stopped jobs, this is the time after model artifacts are + * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.

    + */ + TrainingEndTime?: Date; + + /** + *

    The + * status + * of the training job.

    + */ + TrainingJobStatus: TrainingJobStatus | string | undefined; + + /** + *

    A + * list of the hyperparameters for which you specified ranges to + * search.

    + */ + TunedHyperParameters: Record | undefined; + + /** + *

    The + * reason that the training job failed. + *

    + */ + FailureReason?: string; + + /** + *

    The FinalHyperParameterTuningJobObjectiveMetric object that + * specifies the + * value + * of the + * objective + * metric of the tuning job that launched this training job.

    + */ + FinalHyperParameterTuningJobObjectiveMetric?: FinalHyperParameterTuningJobObjectiveMetric; + + /** + *

    The status of the objective metric for the training job:

    + *
      + *
    • + *

      Succeeded: The + * final + * objective metric for the training job was evaluated by the + * hyperparameter tuning job and + * used + * in the hyperparameter tuning process.

      + *
    • + *
    + *
      + *
    • + *

      Pending: The training job is in progress and evaluation of its final objective + * metric is pending.

      + *
    • + *
    + *
      + *
    • + *

      Failed: + * The final objective metric for the training job was not evaluated, and was not + * used in the hyperparameter tuning process. This typically occurs when the + * training job failed or did not emit an objective + * metric.

      + *
    • + *
    + */ + ObjectiveStatus?: ObjectiveStatus | string; } -export namespace EnvironmentParameter { +export namespace HyperParameterTrainingJobSummary { /** * @internal */ - export const filterSensitiveLog = (obj: EnvironmentParameter): any => ({ + export const filterSensitiveLog = (obj: HyperParameterTrainingJobSummary): any => ({ ...obj, }); } +export enum HyperParameterTuningJobStatus { + COMPLETED = "Completed", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + STOPPED = "Stopped", + STOPPING = "Stopping", +} + /** - *

    Defines the model configuration. Includes the specification name and environment parameters.

    + *

    Specifies the number of training jobs that this hyperparameter tuning job launched, + * categorized by the status of their objective metric. The objective metric status shows + * whether the + * final + * objective metric for the training job has been evaluated by the + * tuning job and used in the hyperparameter tuning process.

    */ -export interface ModelConfiguration { +export interface ObjectiveStatusCounters { /** - *

    The inference specification name in the model package version.

    + *

    The number of training jobs whose final objective metric was evaluated by the + * hyperparameter tuning job and used in the hyperparameter tuning process.

    */ - InferenceSpecificationName?: string; + Succeeded?: number; /** - *

    Defines the environment parameters that includes key, value types, and values.

    + *

    The number of training jobs that are in progress and pending evaluation of their final + * objective metric.

    */ - EnvironmentParameters?: EnvironmentParameter[]; + Pending?: number; + + /** + *

    The number of training jobs whose final objective metric was not evaluated and used in + * the hyperparameter tuning process. This typically occurs when the training job failed or + * did not emit an objective metric.

    + */ + Failed?: number; } -export namespace ModelConfiguration { +export namespace ObjectiveStatusCounters { /** * @internal */ - export const filterSensitiveLog = (obj: ModelConfiguration): any => ({ + export const filterSensitiveLog = (obj: ObjectiveStatusCounters): any => ({ ...obj, }); } /** - *

    A list of recommendations made by Amazon SageMaker Inference Recommender.

    + *

    The numbers of training jobs launched by a hyperparameter tuning job, categorized by + * status.

    */ -export interface InferenceRecommendation { +export interface TrainingJobStatusCounters { /** - *

    The metrics used to decide what recommendation to make.

    + *

    The number of completed training jobs launched by the hyperparameter tuning + * job.

    */ - Metrics: RecommendationMetrics | undefined; + Completed?: number; /** - *

    Defines the endpoint configuration parameters.

    + *

    The number of in-progress training jobs launched by a hyperparameter tuning + * job.

    */ - EndpointConfiguration: EndpointOutputConfiguration | undefined; + InProgress?: number; /** - *

    Defines the model configuration.

    + *

    The number of training jobs that failed, but can be retried. A failed training job can + * be retried only if it failed because an internal service error occurred.

    */ - ModelConfiguration: ModelConfiguration | undefined; + RetryableError?: number; + + /** + *

    The number of training jobs that failed and can't be retried. A failed training job + * can't be retried if it failed because a client error occurred.

    + */ + NonRetryableError?: number; + + /** + *

    The number of training jobs launched by a hyperparameter tuning job that were + * manually + * stopped.

    + */ + Stopped?: number; } -export namespace InferenceRecommendation { +export namespace TrainingJobStatusCounters { /** * @internal */ - export const filterSensitiveLog = (obj: InferenceRecommendation): any => ({ + export const filterSensitiveLog = (obj: TrainingJobStatusCounters): any => ({ ...obj, }); } -export enum RecommendationJobStatus { - COMPLETED = "COMPLETED", - FAILED = "FAILED", - IN_PROGRESS = "IN_PROGRESS", - PENDING = "PENDING", - STOPPED = "STOPPED", - STOPPING = "STOPPING", -} - -export interface DescribeInferenceRecommendationsJobResponse { +export interface DescribeHyperParameterTuningJobResponse { /** - *

    The name of the job. The name must be unique within an - * Amazon Web Services Region in the Amazon Web Services account.

    + *

    The name of the tuning job.

    */ - JobName: string | undefined; + HyperParameterTuningJobName: string | undefined; /** - *

    The job description that you provided when you initiated the job.

    + *

    The + * Amazon Resource Name (ARN) of the tuning job.

    */ - JobDescription?: string; + HyperParameterTuningJobArn: string | undefined; /** - *

    The job type that you provided when you initiated the job.

    + *

    The HyperParameterTuningJobConfig object that specifies the + * configuration of the tuning job.

    */ - JobType: RecommendationJobType | string | undefined; + HyperParameterTuningJobConfig: HyperParameterTuningJobConfig | undefined; /** - *

    The Amazon Resource Name (ARN) of the job.

    + *

    The HyperParameterTrainingJobDefinition object that specifies the + * definition of the training jobs that this tuning job launches.

    */ - JobArn: string | undefined; + TrainingJobDefinition?: HyperParameterTrainingJobDefinition; /** - *

    The Amazon Resource Name (ARN) of the Amazon Web Services - * Identity and Access Management (IAM) role you provided when you initiated the job.

    + *

    A list of the HyperParameterTrainingJobDefinition objects launched + * for this tuning job.

    */ - RoleArn: string | undefined; + TrainingJobDefinitions?: HyperParameterTrainingJobDefinition[]; /** - *

    The status of the job.

    + *

    The status of the tuning job: InProgress, Completed, Failed, Stopping, or + * Stopped.

    */ - Status: RecommendationJobStatus | string | undefined; + HyperParameterTuningJobStatus: HyperParameterTuningJobStatus | string | undefined; /** - *

    A timestamp that shows when the job was created.

    + *

    The date and time that the tuning job started.

    */ CreationTime: Date | undefined; /** - *

    A timestamp that shows when the job completed.

    + *

    The date and time that the tuning job ended.

    */ - CompletionTime?: Date; + HyperParameterTuningEndTime?: Date; /** - *

    A timestamp that shows when the job was last modified.

    + *

    The date and time that the status of the tuning job was modified.

    */ - LastModifiedTime: Date | undefined; + LastModifiedTime?: Date; /** - *

    If the job fails, provides information why the job failed.

    + *

    The TrainingJobStatusCounters object that specifies the number of + * training jobs, categorized by status, that this tuning job launched.

    */ - FailureReason?: string; + TrainingJobStatusCounters: TrainingJobStatusCounters | undefined; /** - *

    Returns information about the versioned model package Amazon Resource Name (ARN), - * the traffic pattern, and endpoint configurations you provided when you initiated the job.

    + *

    The ObjectiveStatusCounters object that specifies the number of + * training jobs, categorized by the status of their final objective metric, that this + * tuning job launched.

    */ - InputConfig: RecommendationJobInputConfig | undefined; + ObjectiveStatusCounters: ObjectiveStatusCounters | undefined; /** - *

    The stopping conditions that you provided when you initiated the job.

    + *

    A TrainingJobSummary object that describes the training job that + * completed with the best current HyperParameterTuningJobObjective.

    */ - StoppingConditions?: RecommendationJobStoppingConditions; + BestTrainingJob?: HyperParameterTrainingJobSummary; /** - *

    The recommendations made by Inference Recommender.

    + *

    If the hyperparameter tuning job is an warm start tuning job with a + * WarmStartType of IDENTICAL_DATA_AND_ALGORITHM, this is the + * TrainingJobSummary for the training job with the best objective + * metric value of all training jobs launched by this tuning job and all parent jobs + * specified for the warm start tuning job.

    */ - InferenceRecommendations?: InferenceRecommendation[]; + OverallBestTrainingJob?: HyperParameterTrainingJobSummary; + + /** + *

    The configuration for starting the hyperparameter parameter tuning job using one or + * more previous tuning jobs as a starting point. The results of previous tuning jobs are + * used to inform which combinations of hyperparameters to search over in the new tuning + * job.

    + */ + WarmStartConfig?: HyperParameterTuningJobWarmStartConfig; + + /** + *

    If the tuning job failed, the reason it failed.

    + */ + FailureReason?: string; } -export namespace DescribeInferenceRecommendationsJobResponse { +export namespace DescribeHyperParameterTuningJobResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeInferenceRecommendationsJobResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeHyperParameterTuningJobResponse): any => ({ ...obj, }); } -export interface DescribeLabelingJobRequest { +export interface DescribeImageRequest { /** - *

    The name of the labeling job to return information for.

    + *

    The name of the image to describe.

    */ - LabelingJobName: string | undefined; + ImageName: string | undefined; } -export namespace DescribeLabelingJobRequest { +export namespace DescribeImageRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeLabelingJobRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeImageRequest): any => ({ ...obj, }); } -/** - *

    Provides a breakdown of the number of objects labeled.

    - */ -export interface LabelCounters { +export enum ImageStatus { + CREATED = "CREATED", + CREATE_FAILED = "CREATE_FAILED", + CREATING = "CREATING", + DELETE_FAILED = "DELETE_FAILED", + DELETING = "DELETING", + UPDATE_FAILED = "UPDATE_FAILED", + UPDATING = "UPDATING", +} + +export interface DescribeImageResponse { /** - *

    The total number of objects labeled.

    + *

    When the image was created.

    */ - TotalLabeled?: number; + CreationTime?: Date; /** - *

    The total number of objects labeled by a human worker.

    + *

    The description of the image.

    */ - HumanLabeled?: number; + Description?: string; /** - *

    The total number of objects labeled by automated data labeling.

    + *

    The name of the image as displayed.

    */ - MachineLabeled?: number; + DisplayName?: string; /** - *

    The total number of objects that could not be labeled due to an error.

    + *

    When a create, update, or delete operation fails, the reason for the failure.

    */ - FailedNonRetryableError?: number; + FailureReason?: string; /** - *

    The total number of objects not yet labeled.

    + *

    The Amazon Resource Name (ARN) of the image.

    */ - Unlabeled?: number; + ImageArn?: string; + + /** + *

    The name of the image.

    + */ + ImageName?: string; + + /** + *

    The status of the image.

    + */ + ImageStatus?: ImageStatus | string; + + /** + *

    When the image was last modified.

    + */ + LastModifiedTime?: Date; + + /** + *

    The Amazon Resource Name (ARN) of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.

    + */ + RoleArn?: string; } -export namespace LabelCounters { +export namespace DescribeImageResponse { /** * @internal */ - export const filterSensitiveLog = (obj: LabelCounters): any => ({ + export const filterSensitiveLog = (obj: DescribeImageResponse): any => ({ ...obj, }); } -/** - *

    Specifies the location of the output produced by the labeling job.

    - */ -export interface LabelingJobOutput { +export interface DescribeImageVersionRequest { /** - *

    The Amazon S3 bucket location of the manifest file for labeled data.

    + *

    The name of the image.

    */ - OutputDatasetS3Uri: string | undefined; + ImageName: string | undefined; /** - *

    The Amazon Resource Name (ARN) for the most recent SageMaker model trained as part of - * automated data labeling.

    + *

    The version of the image. If not specified, the latest version is described.

    */ - FinalActiveLearningModelArn?: string; + Version?: number; } -export namespace LabelingJobOutput { +export namespace DescribeImageVersionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: LabelingJobOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeImageVersionRequest): any => ({ ...obj, }); } -export enum LabelingJobStatus { - COMPLETED = "Completed", - FAILED = "Failed", - INITIALIZING = "Initializing", - IN_PROGRESS = "InProgress", - STOPPED = "Stopped", - STOPPING = "Stopping", +export enum ImageVersionStatus { + CREATED = "CREATED", + CREATE_FAILED = "CREATE_FAILED", + CREATING = "CREATING", + DELETE_FAILED = "DELETE_FAILED", + DELETING = "DELETING", } -export interface DescribeLabelingJobResponse { - /** - *

    The processing status of the labeling job.

    - */ - LabelingJobStatus: LabelingJobStatus | string | undefined; - - /** - *

    Provides a breakdown of the number of data objects labeled by humans, the number of - * objects labeled by machine, the number of objects than couldn't be labeled, and the - * total number of objects labeled.

    - */ - LabelCounters: LabelCounters | undefined; - - /** - *

    If the job failed, the reason that it failed.

    - */ - FailureReason?: string; - - /** - *

    The date and time that the labeling job was created.

    - */ - CreationTime: Date | undefined; - +export interface DescribeImageVersionResponse { /** - *

    The date and time that the labeling job was last updated.

    + *

    The registry path of the container image on which this image version is based.

    */ - LastModifiedTime: Date | undefined; + BaseImage?: string; /** - *

    A unique identifier for work done as part of a labeling job.

    + *

    The registry path of the container image that contains this image version.

    */ - JobReferenceCode: string | undefined; + ContainerImage?: string; /** - *

    The name assigned to the labeling job when it was created.

    + *

    When the version was created.

    */ - LabelingJobName: string | undefined; + CreationTime?: Date; /** - *

    The Amazon Resource Name (ARN) of the labeling job.

    + *

    When a create or delete operation fails, the reason for the failure.

    */ - LabelingJobArn: string | undefined; + FailureReason?: string; /** - *

    The attribute used as the label in the output manifest file.

    + *

    The Amazon Resource Name (ARN) of the image the version is based on.

    */ - LabelAttributeName?: string; + ImageArn?: string; /** - *

    Input configuration information for the labeling job, such as the Amazon S3 location of the - * data objects and the location of the manifest file that describes the data - * objects.

    + *

    The ARN of the version.

    */ - InputConfig: LabelingJobInputConfig | undefined; + ImageVersionArn?: string; /** - *

    The location of the job's output data and the Amazon Web Services Key Management Service key ID for the key used to - * encrypt the output data, if any.

    + *

    The status of the version.

    */ - OutputConfig: LabelingJobOutputConfig | undefined; + ImageVersionStatus?: ImageVersionStatus | string; /** - *

    The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf - * during data labeling.

    + *

    When the version was last modified.

    */ - RoleArn: string | undefined; + LastModifiedTime?: Date; /** - *

    The S3 location of the JSON file that defines the categories used to label data - * objects. Please note the following label-category limits:

    - *
      - *
    • - *

      Semantic segmentation labeling jobs using automated labeling: 20 labels

      - *
    • - *
    • - *

      Box bounding labeling jobs (all): 10 labels

      - *
    • - *
    - *

    The file is a JSON structure in the following format:

    - *

    - * { - *

    - *

    - * "document-version": "2018-11-28" - *

    - *

    - * "labels": [ - *

    - *

    - * { - *

    - *

    - * "label": "label 1" - *

    - *

    - * }, - *

    - *

    - * { - *

    - *

    - * "label": "label 2" - *

    - *

    - * }, - *

    - *

    - * ... - *

    - *

    - * { - *

    - *

    - * "label": "label n" - *

    - *

    - * } - *

    - *

    - * ] - *

    - *

    - * } - *

    + *

    The version number.

    */ - LabelCategoryConfigS3Uri?: string; + Version?: number; +} +export namespace DescribeImageVersionResponse { /** - *

    A set of conditions for stopping a labeling job. If any of the conditions are met, the - * job is automatically stopped.

    + * @internal */ - StoppingConditions?: LabelingJobStoppingConditions; + export const filterSensitiveLog = (obj: DescribeImageVersionResponse): any => ({ + ...obj, + }); +} +export interface DescribeInferenceRecommendationsJobRequest { /** - *

    Configuration information for automated data labeling.

    + *

    The name of the job. The name must be unique within an + * Amazon Web Services Region in the Amazon Web Services account.

    */ - LabelingJobAlgorithmsConfig?: LabelingJobAlgorithmsConfig; + JobName: string | undefined; +} +export namespace DescribeInferenceRecommendationsJobRequest { /** - *

    Configuration information required for human workers to complete a labeling - * task.

    + * @internal */ - HumanTaskConfig: HumanTaskConfig | undefined; + export const filterSensitiveLog = (obj: DescribeInferenceRecommendationsJobRequest): any => ({ + ...obj, + }); +} +/** + *

    The endpoint configuration made by Inference Recommender during a recommendation job.

    + */ +export interface EndpointOutputConfiguration { /** - *

    An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in - * different ways, for example, by purpose, owner, or environment. For more information, - * see Tagging Amazon Web Services - * Resources.

    + *

    The name of the endpoint made during a recommendation job.

    */ - Tags?: Tag[]; + EndpointName: string | undefined; /** - *

    The location of the output produced by the labeling job.

    + *

    The name of the production variant (deployed model) made during a recommendation job.

    */ - LabelingJobOutput?: LabelingJobOutput; -} + VariantName: string | undefined; -export namespace DescribeLabelingJobResponse { /** - * @internal + *

    The instance type recommended by Amazon SageMaker Inference Recommender.

    */ - export const filterSensitiveLog = (obj: DescribeLabelingJobResponse): any => ({ - ...obj, - }); -} + InstanceType: ProductionVariantInstanceType | string | undefined; -export interface DescribeLineageGroupRequest { /** - *

    The name of the lineage group.

    + *

    The number of instances recommended to launch initially.

    */ - LineageGroupName: string | undefined; + InitialInstanceCount: number | undefined; } -export namespace DescribeLineageGroupRequest { +export namespace EndpointOutputConfiguration { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeLineageGroupRequest): any => ({ + export const filterSensitiveLog = (obj: EndpointOutputConfiguration): any => ({ ...obj, }); } -export interface DescribeLineageGroupResponse { +/** + *

    The metrics of recommendations.

    + */ +export interface RecommendationMetrics { /** - *

    The name of the lineage group.

    + *

    Defines the cost per hour for the instance.

    */ - LineageGroupName?: string; + CostPerHour: number | undefined; /** - *

    The Amazon Resource Name (ARN) of the lineage group.

    + *

    Defines the cost per inference for the instance .

    */ - LineageGroupArn?: string; + CostPerInference: number | undefined; /** - *

    The display name of the lineage group.

    + *

    The expected maximum number of requests per minute for the instance.

    */ - DisplayName?: string; + MaxInvocations: number | undefined; /** - *

    The description of the lineage group.

    + *

    The expected model latency at maximum invocation per minute for the instance.

    */ - Description?: string; + ModelLatency: number | undefined; +} +export namespace RecommendationMetrics { /** - *

    The creation time of lineage group.

    + * @internal */ - CreationTime?: Date; + export const filterSensitiveLog = (obj: RecommendationMetrics): any => ({ + ...obj, + }); +} +/** + *

    A list of environment parameters suggested by the Amazon SageMaker Inference Recommender.

    + */ +export interface EnvironmentParameter { /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    The environment key suggested by the Amazon SageMaker Inference Recommender.

    */ - CreatedBy?: UserContext; + Key: string | undefined; /** - *

    The last modified time of the lineage group.

    + *

    The value type suggested by the Amazon SageMaker Inference Recommender.

    */ - LastModifiedTime?: Date; + ValueType: string | undefined; /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    The value suggested by the Amazon SageMaker Inference Recommender.

    */ - LastModifiedBy?: UserContext; + Value: string | undefined; } -export namespace DescribeLineageGroupResponse { +export namespace EnvironmentParameter { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeLineageGroupResponse): any => ({ + export const filterSensitiveLog = (obj: EnvironmentParameter): any => ({ ...obj, }); } -export interface DescribeModelInput { +/** + *

    Defines the model configuration. Includes the specification name and environment parameters.

    + */ +export interface ModelConfiguration { /** - *

    The name of the model.

    + *

    The inference specification name in the model package version.

    */ - ModelName: string | undefined; + InferenceSpecificationName?: string; + + /** + *

    Defines the environment parameters that includes key, value types, and values.

    + */ + EnvironmentParameters?: EnvironmentParameter[]; } -export namespace DescribeModelInput { +export namespace ModelConfiguration { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelInput): any => ({ + export const filterSensitiveLog = (obj: ModelConfiguration): any => ({ ...obj, }); } -export interface DescribeModelOutput { +/** + *

    A list of recommendations made by Amazon SageMaker Inference Recommender.

    + */ +export interface InferenceRecommendation { /** - *

    Name of the SageMaker model.

    + *

    The metrics used to decide what recommendation to make.

    */ - ModelName: string | undefined; + Metrics: RecommendationMetrics | undefined; /** - *

    The location of the primary inference code, associated artifacts, and custom - * environment map that the inference code uses when it is deployed in production. - *

    + *

    Defines the endpoint configuration parameters.

    */ - PrimaryContainer?: ContainerDefinition; + EndpointConfiguration: EndpointOutputConfiguration | undefined; /** - *

    The containers in the inference pipeline.

    + *

    Defines the model configuration.

    */ - Containers?: ContainerDefinition[]; + ModelConfiguration: ModelConfiguration | undefined; +} +export namespace InferenceRecommendation { /** - *

    Specifies details of how containers in a multi-container endpoint are called.

    + * @internal */ - InferenceExecutionConfig?: InferenceExecutionConfig; - - /** - *

    The Amazon Resource Name (ARN) of the IAM role that you specified for the - * model.

    - */ - ExecutionRoleArn: string | undefined; - - /** - *

    A VpcConfig object that specifies the VPC that this model has access - * to. For more information, see Protect Endpoints by Using an Amazon Virtual - * Private Cloud - *

    - */ - VpcConfig?: VpcConfig; - - /** - *

    A timestamp that shows when the model was created.

    - */ - CreationTime: Date | undefined; - - /** - *

    The Amazon Resource Name (ARN) of the model.

    - */ - ModelArn: string | undefined; - - /** - *

    If True, no inbound or outbound network calls can be made to or from the - * model container.

    - */ - EnableNetworkIsolation?: boolean; -} - -export namespace DescribeModelOutput { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeModelOutput): any => ({ + export const filterSensitiveLog = (obj: InferenceRecommendation): any => ({ ...obj, }); } -export interface DescribeModelBiasJobDefinitionRequest { +export enum RecommendationJobStatus { + COMPLETED = "COMPLETED", + FAILED = "FAILED", + IN_PROGRESS = "IN_PROGRESS", + PENDING = "PENDING", + STOPPED = "STOPPED", + STOPPING = "STOPPING", +} + +export interface DescribeInferenceRecommendationsJobResponse { /** - *

    The name of the model bias job definition. The name must be unique within an Amazon Web Services Region - * in the Amazon Web Services account.

    + *

    The name of the job. The name must be unique within an + * Amazon Web Services Region in the Amazon Web Services account.

    */ - JobDefinitionName: string | undefined; -} + JobName: string | undefined; -export namespace DescribeModelBiasJobDefinitionRequest { /** - * @internal + *

    The job description that you provided when you initiated the job.

    */ - export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionRequest): any => ({ - ...obj, - }); -} + JobDescription?: string; -export interface DescribeModelBiasJobDefinitionResponse { /** - *

    The Amazon Resource Name (ARN) of the model bias job.

    + *

    The job type that you provided when you initiated the job.

    */ - JobDefinitionArn: string | undefined; + JobType: RecommendationJobType | string | undefined; /** - *

    The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the - * Amazon Web Services account.

    + *

    The Amazon Resource Name (ARN) of the job.

    */ - JobDefinitionName: string | undefined; + JobArn: string | undefined; /** - *

    The time at which the model bias job was created.

    + *

    The Amazon Resource Name (ARN) of the Amazon Web Services + * Identity and Access Management (IAM) role you provided when you initiated the job.

    */ - CreationTime: Date | undefined; + RoleArn: string | undefined; /** - *

    The baseline configuration for a model bias job.

    + *

    The status of the job.

    */ - ModelBiasBaselineConfig?: ModelBiasBaselineConfig; + Status: RecommendationJobStatus | string | undefined; /** - *

    Configures the model bias job to run a specified Docker container image.

    + *

    A timestamp that shows when the job was created.

    */ - ModelBiasAppSpecification: ModelBiasAppSpecification | undefined; + CreationTime: Date | undefined; /** - *

    Inputs for the model bias job.

    + *

    A timestamp that shows when the job completed.

    */ - ModelBiasJobInput: ModelBiasJobInput | undefined; + CompletionTime?: Date; /** - *

    The output configuration for monitoring jobs.

    + *

    A timestamp that shows when the job was last modified.

    */ - ModelBiasJobOutputConfig: MonitoringOutputConfig | undefined; + LastModifiedTime: Date | undefined; /** - *

    Identifies the resources to deploy for a monitoring job.

    + *

    If the job fails, provides information why the job failed.

    */ - JobResources: MonitoringResources | undefined; + FailureReason?: string; /** - *

    Networking options for a model bias job.

    + *

    Returns information about the versioned model package Amazon Resource Name (ARN), + * the traffic pattern, and endpoint configurations you provided when you initiated the job.

    */ - NetworkConfig?: MonitoringNetworkConfig; + InputConfig: RecommendationJobInputConfig | undefined; /** - *

    The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that - * has read permission to the input data location and write permission to the output data - * location in Amazon S3.

    + *

    The stopping conditions that you provided when you initiated the job.

    */ - RoleArn: string | undefined; + StoppingConditions?: RecommendationJobStoppingConditions; /** - *

    A time limit for how long the monitoring job is allowed to run before stopping.

    + *

    The recommendations made by Inference Recommender.

    */ - StoppingCondition?: MonitoringStoppingCondition; + InferenceRecommendations?: InferenceRecommendation[]; } -export namespace DescribeModelBiasJobDefinitionResponse { +export namespace DescribeInferenceRecommendationsJobResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeInferenceRecommendationsJobResponse): any => ({ ...obj, }); } -export interface DescribeModelExplainabilityJobDefinitionRequest { +export interface DescribeLabelingJobRequest { /** - *

    The name of the model explainability job definition. The name must be unique within an - * Amazon Web Services Region in the Amazon Web Services account.

    + *

    The name of the labeling job to return information for.

    */ - JobDefinitionName: string | undefined; + LabelingJobName: string | undefined; } -export namespace DescribeModelExplainabilityJobDefinitionRequest { +export namespace DescribeLabelingJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelExplainabilityJobDefinitionRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeLabelingJobRequest): any => ({ ...obj, }); } -export interface DescribeModelExplainabilityJobDefinitionResponse { - /** - *

    The Amazon Resource Name (ARN) of the model explainability job.

    - */ - JobDefinitionArn: string | undefined; - - /** - *

    The name of the explainability job definition. The name must be unique within an Amazon Web Services - * Region in the Amazon Web Services account.

    - */ - JobDefinitionName: string | undefined; - - /** - *

    The time at which the model explainability job was created.

    - */ - CreationTime: Date | undefined; - - /** - *

    The baseline configuration for a model explainability job.

    - */ - ModelExplainabilityBaselineConfig?: ModelExplainabilityBaselineConfig; - - /** - *

    Configures the model explainability job to run a specified Docker container - * image.

    - */ - ModelExplainabilityAppSpecification: ModelExplainabilityAppSpecification | undefined; - - /** - *

    Inputs for the model explainability job.

    - */ - ModelExplainabilityJobInput: ModelExplainabilityJobInput | undefined; - +/** + *

    Provides a breakdown of the number of objects labeled.

    + */ +export interface LabelCounters { /** - *

    The output configuration for monitoring jobs.

    + *

    The total number of objects labeled.

    */ - ModelExplainabilityJobOutputConfig: MonitoringOutputConfig | undefined; + TotalLabeled?: number; /** - *

    Identifies the resources to deploy for a monitoring job.

    + *

    The total number of objects labeled by a human worker.

    */ - JobResources: MonitoringResources | undefined; + HumanLabeled?: number; /** - *

    Networking options for a model explainability job.

    + *

    The total number of objects labeled by automated data labeling.

    */ - NetworkConfig?: MonitoringNetworkConfig; + MachineLabeled?: number; /** - *

    The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that - * has read permission to the input data location and write permission to the output data - * location in Amazon S3.

    + *

    The total number of objects that could not be labeled due to an error.

    */ - RoleArn: string | undefined; + FailedNonRetryableError?: number; /** - *

    A time limit for how long the monitoring job is allowed to run before stopping.

    + *

    The total number of objects not yet labeled.

    */ - StoppingCondition?: MonitoringStoppingCondition; + Unlabeled?: number; } -export namespace DescribeModelExplainabilityJobDefinitionResponse { +export namespace LabelCounters { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelExplainabilityJobDefinitionResponse): any => ({ + export const filterSensitiveLog = (obj: LabelCounters): any => ({ ...obj, }); } -export interface DescribeModelPackageInput { +/** + *

    Specifies the location of the output produced by the labeling job.

    + */ +export interface LabelingJobOutput { /** - *

    The name or Amazon Resource Name (ARN) of the model package to describe.

    - *

    When you specify a name, the name must have 1 to 63 characters. Valid - * characters are a-z, A-Z, 0-9, and - (hyphen).

    + *

    The Amazon S3 bucket location of the manifest file for labeled data.

    */ - ModelPackageName: string | undefined; + OutputDatasetS3Uri: string | undefined; + + /** + *

    The Amazon Resource Name (ARN) for the most recent SageMaker model trained as part of + * automated data labeling.

    + */ + FinalActiveLearningModelArn?: string; } -export namespace DescribeModelPackageInput { +export namespace LabelingJobOutput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelPackageInput): any => ({ + export const filterSensitiveLog = (obj: LabelingJobOutput): any => ({ ...obj, }); } -export enum DetailedModelPackageStatus { +export enum LabelingJobStatus { COMPLETED = "Completed", FAILED = "Failed", + INITIALIZING = "Initializing", IN_PROGRESS = "InProgress", - NOT_STARTED = "NotStarted", + STOPPED = "Stopped", + STOPPING = "Stopping", } -/** - *

    Represents the overall status of a model package.

    - */ -export interface ModelPackageStatusItem { +export interface DescribeLabelingJobResponse { /** - *

    The name of the model package for which the overall status is being reported.

    + *

    The processing status of the labeling job.

    */ - Name: string | undefined; + LabelingJobStatus: LabelingJobStatus | string | undefined; /** - *

    The current status.

    + *

    Provides a breakdown of the number of data objects labeled by humans, the number of + * objects labeled by machine, the number of objects than couldn't be labeled, and the + * total number of objects labeled.

    */ - Status: DetailedModelPackageStatus | string | undefined; + LabelCounters: LabelCounters | undefined; /** - *

    if the overall status is Failed, the reason for the failure.

    + *

    If the job failed, the reason that it failed.

    */ FailureReason?: string; -} -export namespace ModelPackageStatusItem { /** - * @internal + *

    The date and time that the labeling job was created.

    */ - export const filterSensitiveLog = (obj: ModelPackageStatusItem): any => ({ - ...obj, - }); -} + CreationTime: Date | undefined; -/** - *

    Specifies the validation and image scan statuses of the model package.

    - */ -export interface ModelPackageStatusDetails { /** - *

    The validation status of the model package.

    + *

    The date and time that the labeling job was last updated.

    */ - ValidationStatuses: ModelPackageStatusItem[] | undefined; + LastModifiedTime: Date | undefined; /** - *

    The status of the scan of the Docker image container for the model package.

    + *

    A unique identifier for work done as part of a labeling job.

    */ - ImageScanStatuses?: ModelPackageStatusItem[]; -} - -export namespace ModelPackageStatusDetails { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ModelPackageStatusDetails): any => ({ - ...obj, - }); -} - -export interface DescribeModelPackageOutput { - /** - *

    The name of the model package being described.

    - */ - ModelPackageName: string | undefined; - - /** - *

    If the model is a versioned model, the name of the model group that the versioned - * model belongs to.

    - */ - ModelPackageGroupName?: string; + JobReferenceCode: string | undefined; /** - *

    The version of the model package.

    + *

    The name assigned to the labeling job when it was created.

    */ - ModelPackageVersion?: number; + LabelingJobName: string | undefined; /** - *

    The Amazon Resource Name (ARN) of the model package.

    + *

    The Amazon Resource Name (ARN) of the labeling job.

    */ - ModelPackageArn: string | undefined; + LabelingJobArn: string | undefined; /** - *

    A brief summary of the model package.

    + *

    The attribute used as the label in the output manifest file.

    */ - ModelPackageDescription?: string; + LabelAttributeName?: string; /** - *

    A timestamp specifying when the model package was created.

    + *

    Input configuration information for the labeling job, such as the Amazon S3 location of the + * data objects and the location of the manifest file that describes the data + * objects.

    */ - CreationTime: Date | undefined; + InputConfig: LabelingJobInputConfig | undefined; /** - *

    Details about inference jobs that can be run with models based on this model - * package.

    + *

    The location of the job's output data and the Amazon Web Services Key Management Service key ID for the key used to + * encrypt the output data, if any.

    */ - InferenceSpecification?: InferenceSpecification; + OutputConfig: LabelingJobOutputConfig | undefined; /** - *

    Details about the algorithm that was used to create the model package.

    + *

    The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf + * during data labeling.

    */ - SourceAlgorithmSpecification?: SourceAlgorithmSpecification; + RoleArn: string | undefined; /** - *

    Configurations for one or more transform jobs that SageMaker runs to test the model - * package.

    + *

    The S3 location of the JSON file that defines the categories used to label data + * objects. Please note the following label-category limits:

    + *
      + *
    • + *

      Semantic segmentation labeling jobs using automated labeling: 20 labels

      + *
    • + *
    • + *

      Box bounding labeling jobs (all): 10 labels

      + *
    • + *
    + *

    The file is a JSON structure in the following format:

    + *

    + * { + *

    + *

    + * "document-version": "2018-11-28" + *

    + *

    + * "labels": [ + *

    + *

    + * { + *

    + *

    + * "label": "label 1" + *

    + *

    + * }, + *

    + *

    + * { + *

    + *

    + * "label": "label 2" + *

    + *

    + * }, + *

    + *

    + * ... + *

    + *

    + * { + *

    + *

    + * "label": "label n" + *

    + *

    + * } + *

    + *

    + * ] + *

    + *

    + * } + *

    */ - ValidationSpecification?: ModelPackageValidationSpecification; + LabelCategoryConfigS3Uri?: string; /** - *

    The current status of the model package.

    + *

    A set of conditions for stopping a labeling job. If any of the conditions are met, the + * job is automatically stopped.

    */ - ModelPackageStatus: ModelPackageStatus | string | undefined; + StoppingConditions?: LabelingJobStoppingConditions; /** - *

    Details about the current status of the model package.

    + *

    Configuration information for automated data labeling.

    */ - ModelPackageStatusDetails: ModelPackageStatusDetails | undefined; + LabelingJobAlgorithmsConfig?: LabelingJobAlgorithmsConfig; /** - *

    Whether the model package is certified for listing on Amazon Web Services Marketplace.

    + *

    Configuration information required for human workers to complete a labeling + * task.

    */ - CertifyForMarketplace?: boolean; + HumanTaskConfig: HumanTaskConfig | undefined; /** - *

    The approval status of the model package.

    + *

    An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in + * different ways, for example, by purpose, owner, or environment. For more information, + * see Tagging Amazon Web Services + * Resources.

    */ - ModelApprovalStatus?: ModelApprovalStatus | string; + Tags?: Tag[]; /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    The location of the output produced by the labeling job.

    */ - CreatedBy?: UserContext; + LabelingJobOutput?: LabelingJobOutput; +} +export namespace DescribeLabelingJobResponse { /** - *

    Metadata properties of the tracking entity, trial, or trial component.

    + * @internal */ - MetadataProperties?: MetadataProperties; + export const filterSensitiveLog = (obj: DescribeLabelingJobResponse): any => ({ + ...obj, + }); +} +export interface DescribeLineageGroupRequest { /** - *

    Metrics for the model.

    + *

    The name of the lineage group.

    */ - ModelMetrics?: ModelMetrics; + LineageGroupName: string | undefined; +} +export namespace DescribeLineageGroupRequest { /** - *

    The last time that the model package was modified.

    + * @internal */ - LastModifiedTime?: Date; + export const filterSensitiveLog = (obj: DescribeLineageGroupRequest): any => ({ + ...obj, + }); +} +export interface DescribeLineageGroupResponse { /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    The name of the lineage group.

    */ - LastModifiedBy?: UserContext; + LineageGroupName?: string; /** - *

    A description provided for the model approval.

    + *

    The Amazon Resource Name (ARN) of the lineage group.

    */ - ApprovalDescription?: string; + LineageGroupArn?: string; /** - *

    The metadata properties associated with the model package versions.

    + *

    The display name of the lineage group.

    */ - CustomerMetadataProperties?: Record; + DisplayName?: string; /** - *

    Represents the drift check baselines that can be used when the model monitor is set using the model package. - * For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines in the Amazon SageMaker Developer Guide. - *

    + *

    The description of the lineage group.

    */ - DriftCheckBaselines?: DriftCheckBaselines; + Description?: string; /** - *

    The machine learning domain of the model package you specified. Common machine - * learning domains include computer vision and natural language processing.

    + *

    The creation time of lineage group.

    */ - Domain?: string; + CreationTime?: Date; /** - *

    The machine learning task you specified that your model package accomplishes. - * Common machine learning tasks include object detection and image classification.

    + *

    Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

    */ - Task?: string; + CreatedBy?: UserContext; /** - *

    The Amazon Simple Storage Service (Amazon S3) path where the sample payload are stored. This path points to a single - * gzip compressed tar archive (.tar.gz suffix).

    + *

    The last modified time of the lineage group.

    */ - SamplePayloadUrl?: string; + LastModifiedTime?: Date; /** - *

    An array of additional Inference Specification objects. Each additional - * Inference Specification specifies artifacts based on this model package that can - * be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts.

    + *

    Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

    */ - AdditionalInferenceSpecifications?: AdditionalInferenceSpecificationDefinition[]; + LastModifiedBy?: UserContext; } -export namespace DescribeModelPackageOutput { +export namespace DescribeLineageGroupResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelPackageOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeLineageGroupResponse): any => ({ ...obj, }); } -export interface DescribeModelPackageGroupInput { +export interface DescribeModelInput { /** - *

    The name of gthe model group to describe.

    + *

    The name of the model.

    */ - ModelPackageGroupName: string | undefined; + ModelName: string | undefined; } -export namespace DescribeModelPackageGroupInput { +export namespace DescribeModelInput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelPackageGroupInput): any => ({ + export const filterSensitiveLog = (obj: DescribeModelInput): any => ({ ...obj, }); } -export enum ModelPackageGroupStatus { - COMPLETED = "Completed", - DELETE_FAILED = "DeleteFailed", - DELETING = "Deleting", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - PENDING = "Pending", -} - -export interface DescribeModelPackageGroupOutput { +export interface DescribeModelOutput { /** - *

    The name of the model group.

    + *

    Name of the SageMaker model.

    */ - ModelPackageGroupName: string | undefined; + ModelName: string | undefined; /** - *

    The Amazon Resource Name (ARN) of the model group.

    + *

    The location of the primary inference code, associated artifacts, and custom + * environment map that the inference code uses when it is deployed in production. + *

    */ - ModelPackageGroupArn: string | undefined; + PrimaryContainer?: ContainerDefinition; /** - *

    A description of the model group.

    + *

    The containers in the inference pipeline.

    */ - ModelPackageGroupDescription?: string; + Containers?: ContainerDefinition[]; /** - *

    The time that the model group was created.

    + *

    Specifies details of how containers in a multi-container endpoint are called.

    */ - CreationTime: Date | undefined; + InferenceExecutionConfig?: InferenceExecutionConfig; /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    The Amazon Resource Name (ARN) of the IAM role that you specified for the + * model.

    */ - CreatedBy: UserContext | undefined; + ExecutionRoleArn: string | undefined; /** - *

    The status of the model group.

    - */ - ModelPackageGroupStatus: ModelPackageGroupStatus | string | undefined; + *

    A VpcConfig object that specifies the VPC that this model has access + * to. For more information, see Protect Endpoints by Using an Amazon Virtual + * Private Cloud + *

    + */ + VpcConfig?: VpcConfig; + + /** + *

    A timestamp that shows when the model was created.

    + */ + CreationTime: Date | undefined; + + /** + *

    The Amazon Resource Name (ARN) of the model.

    + */ + ModelArn: string | undefined; + + /** + *

    If True, no inbound or outbound network calls can be made to or from the + * model container.

    + */ + EnableNetworkIsolation?: boolean; } -export namespace DescribeModelPackageGroupOutput { +export namespace DescribeModelOutput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelPackageGroupOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeModelOutput): any => ({ ...obj, }); } -export interface DescribeModelQualityJobDefinitionRequest { +export interface DescribeModelBiasJobDefinitionRequest { /** - *

    The name of the model quality job. The name must be unique within an Amazon Web Services Region in the - * Amazon Web Services account.

    + *

    The name of the model bias job definition. The name must be unique within an Amazon Web Services Region + * in the Amazon Web Services account.

    */ JobDefinitionName: string | undefined; } -export namespace DescribeModelQualityJobDefinitionRequest { +export namespace DescribeModelBiasJobDefinitionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelQualityJobDefinitionRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionRequest): any => ({ ...obj, }); } -export interface DescribeModelQualityJobDefinitionResponse { +export interface DescribeModelBiasJobDefinitionResponse { /** - *

    The Amazon Resource Name (ARN) of the model quality job.

    + *

    The Amazon Resource Name (ARN) of the model bias job.

    */ JobDefinitionArn: string | undefined; /** - *

    The name of the quality job definition. The name must be unique within an Amazon Web Services Region in - * the Amazon Web Services account.

    + *

    The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the + * Amazon Web Services account.

    */ JobDefinitionName: string | undefined; /** - *

    The time at which the model quality job was created.

    + *

    The time at which the model bias job was created.

    */ CreationTime: Date | undefined; /** - *

    The baseline configuration for a model quality job.

    + *

    The baseline configuration for a model bias job.

    */ - ModelQualityBaselineConfig?: ModelQualityBaselineConfig; + ModelBiasBaselineConfig?: ModelBiasBaselineConfig; /** - *

    Configures the model quality job to run a specified Docker container image.

    + *

    Configures the model bias job to run a specified Docker container image.

    */ - ModelQualityAppSpecification: ModelQualityAppSpecification | undefined; + ModelBiasAppSpecification: ModelBiasAppSpecification | undefined; /** - *

    Inputs for the model quality job.

    + *

    Inputs for the model bias job.

    */ - ModelQualityJobInput: ModelQualityJobInput | undefined; + ModelBiasJobInput: ModelBiasJobInput | undefined; /** *

    The output configuration for monitoring jobs.

    */ - ModelQualityJobOutputConfig: MonitoringOutputConfig | undefined; + ModelBiasJobOutputConfig: MonitoringOutputConfig | undefined; /** *

    Identifies the resources to deploy for a monitoring job.

    @@ -1276,13 +1392,14 @@ export interface DescribeModelQualityJobDefinitionResponse { JobResources: MonitoringResources | undefined; /** - *

    Networking options for a model quality job.

    + *

    Networking options for a model bias job.

    */ NetworkConfig?: MonitoringNetworkConfig; /** - *

    The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to - * perform tasks on your behalf.

    + *

    The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that + * has read permission to the input data location and write permission to the output data + * location in Amazon S3.

    */ RoleArn: string | undefined; @@ -1292,3045 +1409,2862 @@ export interface DescribeModelQualityJobDefinitionResponse { StoppingCondition?: MonitoringStoppingCondition; } -export namespace DescribeModelQualityJobDefinitionResponse { +export namespace DescribeModelBiasJobDefinitionResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelQualityJobDefinitionResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionResponse): any => ({ ...obj, }); } -export interface DescribeMonitoringScheduleRequest { +export interface DescribeModelExplainabilityJobDefinitionRequest { /** - *

    Name of a previously created monitoring schedule.

    + *

    The name of the model explainability job definition. The name must be unique within an + * Amazon Web Services Region in the Amazon Web Services account.

    */ - MonitoringScheduleName: string | undefined; + JobDefinitionName: string | undefined; } -export namespace DescribeMonitoringScheduleRequest { +export namespace DescribeModelExplainabilityJobDefinitionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeMonitoringScheduleRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeModelExplainabilityJobDefinitionRequest): any => ({ ...obj, }); } -export enum ExecutionStatus { - COMPLETED = "Completed", - COMPLETED_WITH_VIOLATIONS = "CompletedWithViolations", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - PENDING = "Pending", - STOPPED = "Stopped", - STOPPING = "Stopping", -} - -/** - *

    Summary of information about the last monitoring job to run.

    - */ -export interface MonitoringExecutionSummary { +export interface DescribeModelExplainabilityJobDefinitionResponse { /** - *

    The name of the monitoring schedule.

    + *

    The Amazon Resource Name (ARN) of the model explainability job.

    */ - MonitoringScheduleName: string | undefined; + JobDefinitionArn: string | undefined; /** - *

    The time the monitoring job was scheduled.

    + *

    The name of the explainability job definition. The name must be unique within an Amazon Web Services + * Region in the Amazon Web Services account.

    */ - ScheduledTime: Date | undefined; + JobDefinitionName: string | undefined; /** - *

    The time at which the monitoring job was created.

    + *

    The time at which the model explainability job was created.

    */ CreationTime: Date | undefined; /** - *

    A timestamp that indicates the last time the monitoring job was modified.

    + *

    The baseline configuration for a model explainability job.

    */ - LastModifiedTime: Date | undefined; + ModelExplainabilityBaselineConfig?: ModelExplainabilityBaselineConfig; /** - *

    The status of the monitoring job.

    + *

    Configures the model explainability job to run a specified Docker container + * image.

    */ - MonitoringExecutionStatus: ExecutionStatus | string | undefined; + ModelExplainabilityAppSpecification: ModelExplainabilityAppSpecification | undefined; /** - *

    The Amazon Resource Name (ARN) of the monitoring job.

    + *

    Inputs for the model explainability job.

    */ - ProcessingJobArn?: string; + ModelExplainabilityJobInput: ModelExplainabilityJobInput | undefined; /** - *

    The name of the endpoint used to run the monitoring job.

    + *

    The output configuration for monitoring jobs.

    */ - EndpointName?: string; + ModelExplainabilityJobOutputConfig: MonitoringOutputConfig | undefined; /** - *

    Contains the reason a monitoring job failed, if it failed.

    + *

    Identifies the resources to deploy for a monitoring job.

    */ - FailureReason?: string; + JobResources: MonitoringResources | undefined; /** - *

    The name of the monitoring job.

    + *

    Networking options for a model explainability job.

    */ - MonitoringJobDefinitionName?: string; + NetworkConfig?: MonitoringNetworkConfig; /** - *

    The type of the monitoring job.

    + *

    The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that + * has read permission to the input data location and write permission to the output data + * location in Amazon S3.

    */ - MonitoringType?: MonitoringType | string; + RoleArn: string | undefined; + + /** + *

    A time limit for how long the monitoring job is allowed to run before stopping.

    + */ + StoppingCondition?: MonitoringStoppingCondition; } -export namespace MonitoringExecutionSummary { +export namespace DescribeModelExplainabilityJobDefinitionResponse { /** * @internal */ - export const filterSensitiveLog = (obj: MonitoringExecutionSummary): any => ({ + export const filterSensitiveLog = (obj: DescribeModelExplainabilityJobDefinitionResponse): any => ({ ...obj, }); } -export enum ScheduleStatus { - FAILED = "Failed", - PENDING = "Pending", - SCHEDULED = "Scheduled", - STOPPED = "Stopped", -} - -export interface DescribeMonitoringScheduleResponse { - /** - *

    The Amazon Resource Name (ARN) of the monitoring schedule.

    - */ - MonitoringScheduleArn: string | undefined; - +export interface DescribeModelPackageInput { /** - *

    Name of the monitoring schedule.

    + *

    The name or Amazon Resource Name (ARN) of the model package to describe.

    + *

    When you specify a name, the name must have 1 to 63 characters. Valid + * characters are a-z, A-Z, 0-9, and - (hyphen).

    */ - MonitoringScheduleName: string | undefined; + ModelPackageName: string | undefined; +} +export namespace DescribeModelPackageInput { /** - *

    The status of an monitoring job.

    + * @internal */ - MonitoringScheduleStatus: ScheduleStatus | string | undefined; + export const filterSensitiveLog = (obj: DescribeModelPackageInput): any => ({ + ...obj, + }); +} - /** - *

    The type of the monitoring job that this schedule runs. This is one of the following - * values.

    - *
      - *
    • - *

      - * DATA_QUALITY - The schedule is for a data quality monitoring - * job.

      - *
    • - *
    • - *

      - * MODEL_QUALITY - The schedule is for a model quality monitoring - * job.

      - *
    • - *
    • - *

      - * MODEL_BIAS - The schedule is for a bias monitoring job.

      - *
    • - *
    • - *

      - * MODEL_EXPLAINABILITY - The schedule is for an explainability - * monitoring job.

      - *
    • - *
    - */ - MonitoringType?: MonitoringType | string; +export enum DetailedModelPackageStatus { + COMPLETED = "Completed", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + NOT_STARTED = "NotStarted", +} +/** + *

    Represents the overall status of a model package.

    + */ +export interface ModelPackageStatusItem { /** - *

    A string, up to one KB in size, that contains the reason a monitoring job failed, if it - * failed.

    + *

    The name of the model package for which the overall status is being reported.

    */ - FailureReason?: string; + Name: string | undefined; /** - *

    The time at which the monitoring job was created.

    + *

    The current status.

    */ - CreationTime: Date | undefined; + Status: DetailedModelPackageStatus | string | undefined; /** - *

    The time at which the monitoring job was last modified.

    + *

    if the overall status is Failed, the reason for the failure.

    */ - LastModifiedTime: Date | undefined; + FailureReason?: string; +} +export namespace ModelPackageStatusItem { /** - *

    The configuration object that specifies the monitoring schedule and defines the - * monitoring job.

    + * @internal */ - MonitoringScheduleConfig: MonitoringScheduleConfig | undefined; + export const filterSensitiveLog = (obj: ModelPackageStatusItem): any => ({ + ...obj, + }); +} +/** + *

    Specifies the validation and image scan statuses of the model package.

    + */ +export interface ModelPackageStatusDetails { /** - *

    The name of the endpoint for the monitoring job.

    + *

    The validation status of the model package.

    */ - EndpointName?: string; + ValidationStatuses: ModelPackageStatusItem[] | undefined; /** - *

    Describes metadata on the last execution to run, if there was one.

    + *

    The status of the scan of the Docker image container for the model package.

    */ - LastMonitoringExecutionSummary?: MonitoringExecutionSummary; + ImageScanStatuses?: ModelPackageStatusItem[]; } -export namespace DescribeMonitoringScheduleResponse { +export namespace ModelPackageStatusDetails { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeMonitoringScheduleResponse): any => ({ + export const filterSensitiveLog = (obj: ModelPackageStatusDetails): any => ({ ...obj, }); } -export interface DescribeNotebookInstanceInput { +export interface DescribeModelPackageOutput { /** - *

    The name of the notebook instance that you want information about.

    + *

    The name of the model package being described.

    */ - NotebookInstanceName: string | undefined; -} + ModelPackageName: string | undefined; -export namespace DescribeNotebookInstanceInput { /** - * @internal + *

    If the model is a versioned model, the name of the model group that the versioned + * model belongs to.

    */ - export const filterSensitiveLog = (obj: DescribeNotebookInstanceInput): any => ({ - ...obj, - }); -} + ModelPackageGroupName?: string; -export enum NotebookInstanceStatus { - Deleting = "Deleting", - Failed = "Failed", - InService = "InService", - Pending = "Pending", - Stopped = "Stopped", - Stopping = "Stopping", - Updating = "Updating", -} + /** + *

    The version of the model package.

    + */ + ModelPackageVersion?: number; -export interface DescribeNotebookInstanceOutput { /** - *

    The Amazon Resource Name (ARN) of the notebook instance.

    + *

    The Amazon Resource Name (ARN) of the model package.

    */ - NotebookInstanceArn?: string; + ModelPackageArn: string | undefined; /** - *

    The name of the SageMaker notebook instance.

    + *

    A brief summary of the model package.

    */ - NotebookInstanceName?: string; + ModelPackageDescription?: string; /** - *

    The status of the notebook instance.

    + *

    A timestamp specifying when the model package was created.

    */ - NotebookInstanceStatus?: NotebookInstanceStatus | string; + CreationTime: Date | undefined; /** - *

    If status is Failed, the reason it failed.

    + *

    Details about inference jobs that can be run with models based on this model + * package.

    */ - FailureReason?: string; + InferenceSpecification?: InferenceSpecification; /** - *

    The URL that you use to connect to the Jupyter notebook that is running in your - * notebook instance.

    + *

    Details about the algorithm that was used to create the model package.

    */ - Url?: string; + SourceAlgorithmSpecification?: SourceAlgorithmSpecification; /** - *

    The type of ML compute instance running on the notebook instance.

    + *

    Configurations for one or more transform jobs that SageMaker runs to test the model + * package.

    */ - InstanceType?: _InstanceType | string; + ValidationSpecification?: ModelPackageValidationSpecification; /** - *

    The ID of the VPC subnet.

    + *

    The current status of the model package.

    */ - SubnetId?: string; + ModelPackageStatus: ModelPackageStatus | string | undefined; /** - *

    The IDs of the VPC security groups.

    + *

    Details about the current status of the model package.

    */ - SecurityGroups?: string[]; + ModelPackageStatusDetails: ModelPackageStatusDetails | undefined; /** - *

    The Amazon Resource Name (ARN) of the IAM role associated with the instance. - *

    + *

    Whether the model package is certified for listing on Amazon Web Services Marketplace.

    */ - RoleArn?: string; + CertifyForMarketplace?: boolean; /** - *

    The Amazon Web Services KMS key ID SageMaker uses to encrypt data when storing it on the ML storage - * volume attached to the instance.

    + *

    The approval status of the model package.

    */ - KmsKeyId?: string; + ModelApprovalStatus?: ModelApprovalStatus | string; /** - *

    The network interface IDs that SageMaker created at the time of creating the instance. - *

    + *

    Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

    */ - NetworkInterfaceId?: string; + CreatedBy?: UserContext; /** - *

    A timestamp. Use this parameter to retrieve the time when the notebook instance was - * last modified.

    + *

    Metadata properties of the tracking entity, trial, or trial component.

    */ - LastModifiedTime?: Date; + MetadataProperties?: MetadataProperties; /** - *

    A timestamp. Use this parameter to return the time when the notebook instance was - * created

    + *

    Metrics for the model.

    */ - CreationTime?: Date; + ModelMetrics?: ModelMetrics; /** - *

    Returns the name of a notebook instance lifecycle configuration.

    - *

    For information about notebook instance lifestyle configurations, see Step - * 2.1: (Optional) Customize a Notebook Instance - *

    + *

    The last time that the model package was modified.

    */ - NotebookInstanceLifecycleConfigName?: string; + LastModifiedTime?: Date; /** - *

    Describes whether SageMaker provides internet access to the notebook instance. If this - * value is set to Disabled, the notebook instance does not have - * internet access, and cannot connect to SageMaker training and endpoint services.

    - *

    For more information, see Notebook Instances Are Internet-Enabled by Default.

    + *

    Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

    */ - DirectInternetAccess?: DirectInternetAccess | string; + LastModifiedBy?: UserContext; /** - *

    The size, in GB, of the ML storage volume attached to the notebook instance.

    + *

    A description provided for the model approval.

    */ - VolumeSizeInGB?: number; + ApprovalDescription?: string; /** - *

    A list of the Elastic Inference (EI) instance types associated with this notebook - * instance. Currently only one EI instance type can be associated with a notebook - * instance. For more information, see Using Elastic Inference in Amazon - * SageMaker.

    + *

    The metadata properties associated with the model package versions.

    */ - AcceleratorTypes?: (NotebookInstanceAcceleratorType | string)[]; + CustomerMetadataProperties?: Record; /** - *

    The Git repository associated with the notebook instance as its default code - * repository. This can be either the name of a Git repository stored as a resource in your - * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any - * other Git repository. When you open a notebook instance, it opens in the directory that - * contains this repository. For more information, see Associating Git Repositories with SageMaker - * Notebook Instances.

    + *

    Represents the drift check baselines that can be used when the model monitor is set using the model package. + * For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines in the Amazon SageMaker Developer Guide. + *

    */ - DefaultCodeRepository?: string; + DriftCheckBaselines?: DriftCheckBaselines; /** - *

    An array of up to three Git repositories associated with the notebook instance. These - * can be either the names of Git repositories stored as resources in your account, or the - * URL of Git repositories in Amazon Web Services CodeCommit or in any - * other Git repository. These repositories are cloned at the same level as the default - * repository of your notebook instance. For more information, see Associating Git - * Repositories with SageMaker Notebook Instances.

    + *

    The machine learning domain of the model package you specified. Common machine + * learning domains include computer vision and natural language processing.

    */ - AdditionalCodeRepositories?: string[]; + Domain?: string; /** - *

    Whether root access is enabled or disabled for users of the notebook instance.

    - * - *

    Lifecycle configurations need root access to be able to set up a notebook - * instance. Because of this, lifecycle configurations associated with a notebook - * instance always run with root access even if you disable root access for - * users.

    - *
    + *

    The machine learning task you specified that your model package accomplishes. + * Common machine learning tasks include object detection and image classification.

    */ - RootAccess?: RootAccess | string; + Task?: string; /** - *

    The platform identifier of the notebook instance runtime environment.

    + *

    The Amazon Simple Storage Service (Amazon S3) path where the sample payload are stored. This path points to a single + * gzip compressed tar archive (.tar.gz suffix).

    */ - PlatformIdentifier?: string; + SamplePayloadUrl?: string; /** - *

    Information on the IMDS configuration of the notebook instance

    + *

    An array of additional Inference Specification objects. Each additional + * Inference Specification specifies artifacts based on this model package that can + * be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts.

    */ - InstanceMetadataServiceConfiguration?: InstanceMetadataServiceConfiguration; + AdditionalInferenceSpecifications?: AdditionalInferenceSpecificationDefinition[]; } -export namespace DescribeNotebookInstanceOutput { +export namespace DescribeModelPackageOutput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeNotebookInstanceOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeModelPackageOutput): any => ({ ...obj, }); } -export interface DescribeNotebookInstanceLifecycleConfigInput { +export interface DescribeModelPackageGroupInput { /** - *

    The name of the lifecycle configuration to describe.

    + *

    The name of gthe model group to describe.

    */ - NotebookInstanceLifecycleConfigName: string | undefined; + ModelPackageGroupName: string | undefined; } -export namespace DescribeNotebookInstanceLifecycleConfigInput { +export namespace DescribeModelPackageGroupInput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeNotebookInstanceLifecycleConfigInput): any => ({ + export const filterSensitiveLog = (obj: DescribeModelPackageGroupInput): any => ({ ...obj, }); } -export interface DescribeNotebookInstanceLifecycleConfigOutput { +export enum ModelPackageGroupStatus { + COMPLETED = "Completed", + DELETE_FAILED = "DeleteFailed", + DELETING = "Deleting", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + PENDING = "Pending", +} + +export interface DescribeModelPackageGroupOutput { /** - *

    The Amazon Resource Name (ARN) of the lifecycle configuration.

    + *

    The name of the model group.

    */ - NotebookInstanceLifecycleConfigArn?: string; + ModelPackageGroupName: string | undefined; /** - *

    The name of the lifecycle configuration.

    + *

    The Amazon Resource Name (ARN) of the model group.

    */ - NotebookInstanceLifecycleConfigName?: string; + ModelPackageGroupArn: string | undefined; /** - *

    The shell script that runs only once, when you create a notebook instance.

    + *

    A description of the model group.

    */ - OnCreate?: NotebookInstanceLifecycleHook[]; + ModelPackageGroupDescription?: string; /** - *

    The shell script that runs every time you start a notebook instance, including when - * you create the notebook instance.

    + *

    The time that the model group was created.

    */ - OnStart?: NotebookInstanceLifecycleHook[]; + CreationTime: Date | undefined; /** - *

    A timestamp that tells when the lifecycle configuration was last modified.

    + *

    Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

    */ - LastModifiedTime?: Date; + CreatedBy: UserContext | undefined; /** - *

    A timestamp that tells when the lifecycle configuration was created.

    + *

    The status of the model group.

    */ - CreationTime?: Date; + ModelPackageGroupStatus: ModelPackageGroupStatus | string | undefined; } -export namespace DescribeNotebookInstanceLifecycleConfigOutput { +export namespace DescribeModelPackageGroupOutput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeNotebookInstanceLifecycleConfigOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeModelPackageGroupOutput): any => ({ ...obj, }); } -export interface DescribePipelineRequest { +export interface DescribeModelQualityJobDefinitionRequest { /** - *

    The name of the pipeline to describe.

    + *

    The name of the model quality job. The name must be unique within an Amazon Web Services Region in the + * Amazon Web Services account.

    */ - PipelineName: string | undefined; + JobDefinitionName: string | undefined; } -export namespace DescribePipelineRequest { +export namespace DescribeModelQualityJobDefinitionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribePipelineRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeModelQualityJobDefinitionRequest): any => ({ ...obj, }); } -export enum PipelineStatus { - ACTIVE = "Active", -} - -export interface DescribePipelineResponse { +export interface DescribeModelQualityJobDefinitionResponse { /** - *

    The Amazon Resource Name (ARN) of the pipeline.

    + *

    The Amazon Resource Name (ARN) of the model quality job.

    */ - PipelineArn?: string; + JobDefinitionArn: string | undefined; /** - *

    The name of the pipeline.

    + *

    The name of the quality job definition. The name must be unique within an Amazon Web Services Region in + * the Amazon Web Services account.

    */ - PipelineName?: string; + JobDefinitionName: string | undefined; /** - *

    The display name of the pipeline.

    + *

    The time at which the model quality job was created.

    */ - PipelineDisplayName?: string; + CreationTime: Date | undefined; /** - *

    The JSON pipeline definition.

    + *

    The baseline configuration for a model quality job.

    */ - PipelineDefinition?: string; + ModelQualityBaselineConfig?: ModelQualityBaselineConfig; /** - *

    The description of the pipeline.

    + *

    Configures the model quality job to run a specified Docker container image.

    */ - PipelineDescription?: string; + ModelQualityAppSpecification: ModelQualityAppSpecification | undefined; /** - *

    The Amazon Resource Name (ARN) that the pipeline uses to execute.

    + *

    Inputs for the model quality job.

    */ - RoleArn?: string; + ModelQualityJobInput: ModelQualityJobInput | undefined; /** - *

    The status of the pipeline execution.

    + *

    The output configuration for monitoring jobs.

    */ - PipelineStatus?: PipelineStatus | string; + ModelQualityJobOutputConfig: MonitoringOutputConfig | undefined; /** - *

    The time when the pipeline was created.

    + *

    Identifies the resources to deploy for a monitoring job.

    */ - CreationTime?: Date; + JobResources: MonitoringResources | undefined; /** - *

    The time when the pipeline was last modified.

    + *

    Networking options for a model quality job.

    */ - LastModifiedTime?: Date; + NetworkConfig?: MonitoringNetworkConfig; /** - *

    The time when the pipeline was last run.

    + *

    The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to + * perform tasks on your behalf.

    */ - LastRunTime?: Date; + RoleArn: string | undefined; /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    A time limit for how long the monitoring job is allowed to run before stopping.

    */ - CreatedBy?: UserContext; + StoppingCondition?: MonitoringStoppingCondition; +} +export namespace DescribeModelQualityJobDefinitionResponse { /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + * @internal */ - LastModifiedBy?: UserContext; + export const filterSensitiveLog = (obj: DescribeModelQualityJobDefinitionResponse): any => ({ + ...obj, + }); +} +export interface DescribeMonitoringScheduleRequest { /** - *

    Lists the parallelism configuration applied to the pipeline.

    + *

    Name of a previously created monitoring schedule.

    */ - ParallelismConfiguration?: ParallelismConfiguration; + MonitoringScheduleName: string | undefined; } -export namespace DescribePipelineResponse { +export namespace DescribeMonitoringScheduleRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribePipelineResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeMonitoringScheduleRequest): any => ({ ...obj, }); } -export interface DescribePipelineDefinitionForExecutionRequest { +export enum ExecutionStatus { + COMPLETED = "Completed", + COMPLETED_WITH_VIOLATIONS = "CompletedWithViolations", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + PENDING = "Pending", + STOPPED = "Stopped", + STOPPING = "Stopping", +} + +/** + *

    Summary of information about the last monitoring job to run.

    + */ +export interface MonitoringExecutionSummary { /** - *

    The Amazon Resource Name (ARN) of the pipeline execution.

    + *

    The name of the monitoring schedule.

    */ - PipelineExecutionArn: string | undefined; -} + MonitoringScheduleName: string | undefined; -export namespace DescribePipelineDefinitionForExecutionRequest { /** - * @internal + *

    The time the monitoring job was scheduled.

    */ - export const filterSensitiveLog = (obj: DescribePipelineDefinitionForExecutionRequest): any => ({ - ...obj, - }); -} + ScheduledTime: Date | undefined; -export interface DescribePipelineDefinitionForExecutionResponse { /** - *

    The JSON pipeline definition.

    + *

    The time at which the monitoring job was created.

    */ - PipelineDefinition?: string; + CreationTime: Date | undefined; /** - *

    The time when the pipeline was created.

    + *

    A timestamp that indicates the last time the monitoring job was modified.

    */ - CreationTime?: Date; -} + LastModifiedTime: Date | undefined; -export namespace DescribePipelineDefinitionForExecutionResponse { /** - * @internal + *

    The status of the monitoring job.

    */ - export const filterSensitiveLog = (obj: DescribePipelineDefinitionForExecutionResponse): any => ({ - ...obj, - }); -} + MonitoringExecutionStatus: ExecutionStatus | string | undefined; -export interface DescribePipelineExecutionRequest { /** - *

    The Amazon Resource Name (ARN) of the pipeline execution.

    + *

    The Amazon Resource Name (ARN) of the monitoring job.

    */ - PipelineExecutionArn: string | undefined; -} + ProcessingJobArn?: string; -export namespace DescribePipelineExecutionRequest { /** - * @internal + *

    The name of the endpoint used to run the monitoring job.

    */ - export const filterSensitiveLog = (obj: DescribePipelineExecutionRequest): any => ({ - ...obj, - }); -} + EndpointName?: string; -export enum PipelineExecutionStatus { - EXECUTING = "Executing", - FAILED = "Failed", - STOPPED = "Stopped", - STOPPING = "Stopping", - SUCCEEDED = "Succeeded", -} + /** + *

    Contains the reason a monitoring job failed, if it failed.

    + */ + FailureReason?: string; -/** - *

    Specifies the names of the experiment and trial created by a pipeline.

    - */ -export interface PipelineExperimentConfig { /** - *

    The name of the experiment.

    + *

    The name of the monitoring job.

    */ - ExperimentName?: string; + MonitoringJobDefinitionName?: string; /** - *

    The name of the trial.

    + *

    The type of the monitoring job.

    */ - TrialName?: string; + MonitoringType?: MonitoringType | string; } -export namespace PipelineExperimentConfig { +export namespace MonitoringExecutionSummary { /** * @internal */ - export const filterSensitiveLog = (obj: PipelineExperimentConfig): any => ({ + export const filterSensitiveLog = (obj: MonitoringExecutionSummary): any => ({ ...obj, }); } -export interface DescribePipelineExecutionResponse { - /** - *

    The Amazon Resource Name (ARN) of the pipeline.

    - */ - PipelineArn?: string; - - /** - *

    The Amazon Resource Name (ARN) of the pipeline execution.

    - */ - PipelineExecutionArn?: string; +export enum ScheduleStatus { + FAILED = "Failed", + PENDING = "Pending", + SCHEDULED = "Scheduled", + STOPPED = "Stopped", +} +export interface DescribeMonitoringScheduleResponse { /** - *

    The display name of the pipeline execution.

    + *

    The Amazon Resource Name (ARN) of the monitoring schedule.

    */ - PipelineExecutionDisplayName?: string; + MonitoringScheduleArn: string | undefined; /** - *

    The status of the pipeline execution.

    + *

    Name of the monitoring schedule.

    */ - PipelineExecutionStatus?: PipelineExecutionStatus | string; + MonitoringScheduleName: string | undefined; /** - *

    The description of the pipeline execution.

    + *

    The status of an monitoring job.

    */ - PipelineExecutionDescription?: string; + MonitoringScheduleStatus: ScheduleStatus | string | undefined; /** - *

    Specifies the names of the experiment and trial created by a pipeline.

    + *

    The type of the monitoring job that this schedule runs. This is one of the following + * values.

    + *
      + *
    • + *

      + * DATA_QUALITY - The schedule is for a data quality monitoring + * job.

      + *
    • + *
    • + *

      + * MODEL_QUALITY - The schedule is for a model quality monitoring + * job.

      + *
    • + *
    • + *

      + * MODEL_BIAS - The schedule is for a bias monitoring job.

      + *
    • + *
    • + *

      + * MODEL_EXPLAINABILITY - The schedule is for an explainability + * monitoring job.

      + *
    • + *
    */ - PipelineExperimentConfig?: PipelineExperimentConfig; + MonitoringType?: MonitoringType | string; /** - *

    If the execution failed, a message describing why.

    + *

    A string, up to one KB in size, that contains the reason a monitoring job failed, if it + * failed.

    */ FailureReason?: string; /** - *

    The time when the pipeline execution was created.

    + *

    The time at which the monitoring job was created.

    */ - CreationTime?: Date; + CreationTime: Date | undefined; /** - *

    The time when the pipeline execution was modified last.

    + *

    The time at which the monitoring job was last modified.

    */ - LastModifiedTime?: Date; + LastModifiedTime: Date | undefined; /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    The configuration object that specifies the monitoring schedule and defines the + * monitoring job.

    */ - CreatedBy?: UserContext; + MonitoringScheduleConfig: MonitoringScheduleConfig | undefined; /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    The name of the endpoint for the monitoring job.

    */ - LastModifiedBy?: UserContext; + EndpointName?: string; /** - *

    The parallelism configuration applied to the pipeline.

    + *

    Describes metadata on the last execution to run, if there was one.

    */ - ParallelismConfiguration?: ParallelismConfiguration; + LastMonitoringExecutionSummary?: MonitoringExecutionSummary; } -export namespace DescribePipelineExecutionResponse { +export namespace DescribeMonitoringScheduleResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribePipelineExecutionResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeMonitoringScheduleResponse): any => ({ ...obj, }); } -export interface DescribeProcessingJobRequest { +export interface DescribeNotebookInstanceInput { /** - *

    The name of the processing job. The name must be unique within an Amazon Web Services Region in the - * Amazon Web Services account.

    + *

    The name of the notebook instance that you want information about.

    */ - ProcessingJobName: string | undefined; + NotebookInstanceName: string | undefined; } -export namespace DescribeProcessingJobRequest { +export namespace DescribeNotebookInstanceInput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeProcessingJobRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeNotebookInstanceInput): any => ({ ...obj, }); } -export enum ProcessingJobStatus { - COMPLETED = "Completed", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - STOPPED = "Stopped", - STOPPING = "Stopping", +export enum NotebookInstanceStatus { + Deleting = "Deleting", + Failed = "Failed", + InService = "InService", + Pending = "Pending", + Stopped = "Stopped", + Stopping = "Stopping", + Updating = "Updating", } -export interface DescribeProcessingJobResponse { +export interface DescribeNotebookInstanceOutput { /** - *

    The inputs for a processing job.

    + *

    The Amazon Resource Name (ARN) of the notebook instance.

    */ - ProcessingInputs?: ProcessingInput[]; + NotebookInstanceArn?: string; /** - *

    Output configuration for the processing job.

    + *

    The name of the SageMaker notebook instance.

    */ - ProcessingOutputConfig?: ProcessingOutputConfig; + NotebookInstanceName?: string; /** - *

    The name of the processing job. The name must be unique within an Amazon Web Services Region in the - * Amazon Web Services account.

    + *

    The status of the notebook instance.

    */ - ProcessingJobName: string | undefined; + NotebookInstanceStatus?: NotebookInstanceStatus | string; /** - *

    Identifies the resources, ML compute instances, and ML storage volumes to deploy for a - * processing job. In distributed training, you specify more than one instance.

    + *

    If status is Failed, the reason it failed.

    */ - ProcessingResources: ProcessingResources | undefined; + FailureReason?: string; /** - *

    The time limit for how long the processing job is allowed to run.

    + *

    The URL that you use to connect to the Jupyter notebook that is running in your + * notebook instance.

    */ - StoppingCondition?: ProcessingStoppingCondition; + Url?: string; /** - *

    Configures the processing job to run a specified container image.

    + *

    The type of ML compute instance running on the notebook instance.

    */ - AppSpecification: AppSpecification | undefined; + InstanceType?: _InstanceType | string; /** - *

    The environment variables set in the Docker container.

    + *

    The ID of the VPC subnet.

    */ - Environment?: Record; + SubnetId?: string; /** - *

    Networking options for a processing job.

    + *

    The IDs of the VPC security groups.

    */ - NetworkConfig?: NetworkConfig; + SecurityGroups?: string[]; /** - *

    The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on - * your behalf.

    + *

    The Amazon Resource Name (ARN) of the IAM role associated with the instance. + *

    */ RoleArn?: string; /** - *

    The configuration information used to create an experiment.

    + *

    The Amazon Web Services KMS key ID SageMaker uses to encrypt data when storing it on the ML storage + * volume attached to the instance.

    */ - ExperimentConfig?: ExperimentConfig; + KmsKeyId?: string; /** - *

    The Amazon Resource Name (ARN) of the processing job.

    + *

    The network interface IDs that SageMaker created at the time of creating the instance. + *

    */ - ProcessingJobArn: string | undefined; + NetworkInterfaceId?: string; /** - *

    Provides the status of a processing job.

    + *

    A timestamp. Use this parameter to retrieve the time when the notebook instance was + * last modified.

    */ - ProcessingJobStatus: ProcessingJobStatus | string | undefined; + LastModifiedTime?: Date; /** - *

    An optional string, up to one KB in size, that contains metadata from the processing - * container when the processing job exits.

    + *

    A timestamp. Use this parameter to return the time when the notebook instance was + * created

    */ - ExitMessage?: string; + CreationTime?: Date; /** - *

    A string, up to one KB in size, that contains the reason a processing job failed, if - * it failed.

    + *

    Returns the name of a notebook instance lifecycle configuration.

    + *

    For information about notebook instance lifestyle configurations, see Step + * 2.1: (Optional) Customize a Notebook Instance + *

    */ - FailureReason?: string; + NotebookInstanceLifecycleConfigName?: string; /** - *

    The time at which the processing job completed.

    + *

    Describes whether SageMaker provides internet access to the notebook instance. If this + * value is set to Disabled, the notebook instance does not have + * internet access, and cannot connect to SageMaker training and endpoint services.

    + *

    For more information, see Notebook Instances Are Internet-Enabled by Default.

    */ - ProcessingEndTime?: Date; + DirectInternetAccess?: DirectInternetAccess | string; /** - *

    The time at which the processing job started.

    + *

    The size, in GB, of the ML storage volume attached to the notebook instance.

    */ - ProcessingStartTime?: Date; + VolumeSizeInGB?: number; /** - *

    The time at which the processing job was last modified.

    + *

    A list of the Elastic Inference (EI) instance types associated with this notebook + * instance. Currently only one EI instance type can be associated with a notebook + * instance. For more information, see Using Elastic Inference in Amazon + * SageMaker.

    */ - LastModifiedTime?: Date; + AcceleratorTypes?: (NotebookInstanceAcceleratorType | string)[]; /** - *

    The time at which the processing job was created.

    + *

    The Git repository associated with the notebook instance as its default code + * repository. This can be either the name of a Git repository stored as a resource in your + * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any + * other Git repository. When you open a notebook instance, it opens in the directory that + * contains this repository. For more information, see Associating Git Repositories with SageMaker + * Notebook Instances.

    */ - CreationTime: Date | undefined; + DefaultCodeRepository?: string; /** - *

    The ARN of a monitoring schedule for an endpoint associated with this processing - * job.

    + *

    An array of up to three Git repositories associated with the notebook instance. These + * can be either the names of Git repositories stored as resources in your account, or the + * URL of Git repositories in Amazon Web Services CodeCommit or in any + * other Git repository. These repositories are cloned at the same level as the default + * repository of your notebook instance. For more information, see Associating Git + * Repositories with SageMaker Notebook Instances.

    */ - MonitoringScheduleArn?: string; + AdditionalCodeRepositories?: string[]; /** - *

    The ARN of an AutoML job associated with this processing job.

    + *

    Whether root access is enabled or disabled for users of the notebook instance.

    + * + *

    Lifecycle configurations need root access to be able to set up a notebook + * instance. Because of this, lifecycle configurations associated with a notebook + * instance always run with root access even if you disable root access for + * users.

    + *
    */ - AutoMLJobArn?: string; + RootAccess?: RootAccess | string; /** - *

    The ARN of a training job associated with this processing job.

    + *

    The platform identifier of the notebook instance runtime environment.

    */ - TrainingJobArn?: string; + PlatformIdentifier?: string; + + /** + *

    Information on the IMDS configuration of the notebook instance

    + */ + InstanceMetadataServiceConfiguration?: InstanceMetadataServiceConfiguration; } -export namespace DescribeProcessingJobResponse { +export namespace DescribeNotebookInstanceOutput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeProcessingJobResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeNotebookInstanceOutput): any => ({ ...obj, }); } -export interface DescribeProjectInput { +export interface DescribeNotebookInstanceLifecycleConfigInput { /** - *

    The name of the project to describe.

    + *

    The name of the lifecycle configuration to describe.

    */ - ProjectName: string | undefined; + NotebookInstanceLifecycleConfigName: string | undefined; } -export namespace DescribeProjectInput { +export namespace DescribeNotebookInstanceLifecycleConfigInput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeProjectInput): any => ({ + export const filterSensitiveLog = (obj: DescribeNotebookInstanceLifecycleConfigInput): any => ({ ...obj, }); } -export enum ProjectStatus { - CREATE_COMPLETED = "CreateCompleted", - CREATE_FAILED = "CreateFailed", - CREATE_IN_PROGRESS = "CreateInProgress", - DELETE_COMPLETED = "DeleteCompleted", - DELETE_FAILED = "DeleteFailed", - DELETE_IN_PROGRESS = "DeleteInProgress", - PENDING = "Pending", - UPDATE_COMPLETED = "UpdateCompleted", - UPDATE_FAILED = "UpdateFailed", - UPDATE_IN_PROGRESS = "UpdateInProgress", -} +export interface DescribeNotebookInstanceLifecycleConfigOutput { + /** + *

    The Amazon Resource Name (ARN) of the lifecycle configuration.

    + */ + NotebookInstanceLifecycleConfigArn?: string; -/** - *

    Details of a provisioned service catalog product. For information about service catalog, - * see What is Amazon Web Services Service - * Catalog.

    - */ -export interface ServiceCatalogProvisionedProductDetails { /** - *

    The ID of the provisioned product.

    + *

    The name of the lifecycle configuration.

    */ - ProvisionedProductId?: string; + NotebookInstanceLifecycleConfigName?: string; /** - *

    The current status of the product.

    - *
      - *
    • - *

      - * AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.

      - *
    • - *
    • - *

      - * UNDER_CHANGE - Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.

      - *
    • - *
    • - *

      - * TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.

      - *
    • - *
    • - *

      - * ERROR - An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.

      - *
    • - *
    • - *

      - * PLAN_IN_PROGRESS - Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.

      - *
    • - *
    + *

    The shell script that runs only once, when you create a notebook instance.

    */ - ProvisionedProductStatusMessage?: string; + OnCreate?: NotebookInstanceLifecycleHook[]; + + /** + *

    The shell script that runs every time you start a notebook instance, including when + * you create the notebook instance.

    + */ + OnStart?: NotebookInstanceLifecycleHook[]; + + /** + *

    A timestamp that tells when the lifecycle configuration was last modified.

    + */ + LastModifiedTime?: Date; + + /** + *

    A timestamp that tells when the lifecycle configuration was created.

    + */ + CreationTime?: Date; } -export namespace ServiceCatalogProvisionedProductDetails { +export namespace DescribeNotebookInstanceLifecycleConfigOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ServiceCatalogProvisionedProductDetails): any => ({ + export const filterSensitiveLog = (obj: DescribeNotebookInstanceLifecycleConfigOutput): any => ({ ...obj, }); } -export interface DescribeProjectOutput { +export interface DescribePipelineRequest { /** - *

    The Amazon Resource Name (ARN) of the project.

    + *

    The name of the pipeline to describe.

    */ - ProjectArn: string | undefined; + PipelineName: string | undefined; +} +export namespace DescribePipelineRequest { /** - *

    The name of the project.

    + * @internal */ - ProjectName: string | undefined; + export const filterSensitiveLog = (obj: DescribePipelineRequest): any => ({ + ...obj, + }); +} + +export enum PipelineStatus { + ACTIVE = "Active", +} +export interface DescribePipelineResponse { /** - *

    The ID of the project.

    + *

    The Amazon Resource Name (ARN) of the pipeline.

    */ - ProjectId: string | undefined; + PipelineArn?: string; /** - *

    The description of the project.

    + *

    The name of the pipeline.

    */ - ProjectDescription?: string; + PipelineName?: string; /** - *

    Information used to provision a service catalog product. For information, see What is Amazon Web Services Service - * Catalog.

    + *

    The display name of the pipeline.

    */ - ServiceCatalogProvisioningDetails: ServiceCatalogProvisioningDetails | undefined; + PipelineDisplayName?: string; /** - *

    Information about a provisioned service catalog product.

    + *

    The JSON pipeline definition.

    */ - ServiceCatalogProvisionedProductDetails?: ServiceCatalogProvisionedProductDetails; + PipelineDefinition?: string; /** - *

    The status of the project.

    + *

    The description of the pipeline.

    */ - ProjectStatus: ProjectStatus | string | undefined; + PipelineDescription?: string; /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    The Amazon Resource Name (ARN) that the pipeline uses to execute.

    */ - CreatedBy?: UserContext; + RoleArn?: string; /** - *

    The time when the project was created.

    + *

    The status of the pipeline execution.

    */ - CreationTime: Date | undefined; + PipelineStatus?: PipelineStatus | string; /** - *

    The timestamp when project was last modified.

    + *

    The time when the pipeline was created.

    */ - LastModifiedTime?: Date; + CreationTime?: Date; /** - *

    Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

    + *

    The time when the pipeline was last modified.

    */ - LastModifiedBy?: UserContext; -} + LastModifiedTime?: Date; -export namespace DescribeProjectOutput { /** - * @internal + *

    The time when the pipeline was last run.

    */ - export const filterSensitiveLog = (obj: DescribeProjectOutput): any => ({ - ...obj, - }); -} + LastRunTime?: Date; -export interface DescribeStudioLifecycleConfigRequest { /** - *

    The name of the Studio Lifecycle Configuration to describe.

    + *

    Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

    */ - StudioLifecycleConfigName: string | undefined; + CreatedBy?: UserContext; + + /** + *

    Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

    + */ + LastModifiedBy?: UserContext; + + /** + *

    Lists the parallelism configuration applied to the pipeline.

    + */ + ParallelismConfiguration?: ParallelismConfiguration; } -export namespace DescribeStudioLifecycleConfigRequest { +export namespace DescribePipelineResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeStudioLifecycleConfigRequest): any => ({ + export const filterSensitiveLog = (obj: DescribePipelineResponse): any => ({ ...obj, }); } -export interface DescribeStudioLifecycleConfigResponse { +export interface DescribePipelineDefinitionForExecutionRequest { /** - *

    The ARN of the Lifecycle Configuration to describe.

    + *

    The Amazon Resource Name (ARN) of the pipeline execution.

    */ - StudioLifecycleConfigArn?: string; + PipelineExecutionArn: string | undefined; +} +export namespace DescribePipelineDefinitionForExecutionRequest { /** - *

    The name of the Studio Lifecycle Configuration that is described.

    + * @internal */ - StudioLifecycleConfigName?: string; + export const filterSensitiveLog = (obj: DescribePipelineDefinitionForExecutionRequest): any => ({ + ...obj, + }); +} +export interface DescribePipelineDefinitionForExecutionResponse { /** - *

    The creation time of the Studio Lifecycle Configuration.

    + *

    The JSON pipeline definition.

    */ - CreationTime?: Date; + PipelineDefinition?: string; /** - *

    This value is equivalent to CreationTime because Studio Lifecycle Configurations are immutable.

    + *

    The time when the pipeline was created.

    */ - LastModifiedTime?: Date; + CreationTime?: Date; +} +export namespace DescribePipelineDefinitionForExecutionResponse { /** - *

    The content of your Studio Lifecycle Configuration script.

    + * @internal */ - StudioLifecycleConfigContent?: string; + export const filterSensitiveLog = (obj: DescribePipelineDefinitionForExecutionResponse): any => ({ + ...obj, + }); +} +export interface DescribePipelineExecutionRequest { /** - *

    The App type that the Lifecycle Configuration is attached to.

    + *

    The Amazon Resource Name (ARN) of the pipeline execution.

    */ - StudioLifecycleConfigAppType?: StudioLifecycleConfigAppType | string; + PipelineExecutionArn: string | undefined; } -export namespace DescribeStudioLifecycleConfigResponse { +export namespace DescribePipelineExecutionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeStudioLifecycleConfigResponse): any => ({ + export const filterSensitiveLog = (obj: DescribePipelineExecutionRequest): any => ({ ...obj, }); } -export interface DescribeSubscribedWorkteamRequest { +export enum PipelineExecutionStatus { + EXECUTING = "Executing", + FAILED = "Failed", + STOPPED = "Stopped", + STOPPING = "Stopping", + SUCCEEDED = "Succeeded", +} + +/** + *

    Specifies the names of the experiment and trial created by a pipeline.

    + */ +export interface PipelineExperimentConfig { /** - *

    The Amazon Resource Name (ARN) of the subscribed work team to describe.

    + *

    The name of the experiment.

    */ - WorkteamArn: string | undefined; + ExperimentName?: string; + + /** + *

    The name of the trial.

    + */ + TrialName?: string; } -export namespace DescribeSubscribedWorkteamRequest { +export namespace PipelineExperimentConfig { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeSubscribedWorkteamRequest): any => ({ + export const filterSensitiveLog = (obj: PipelineExperimentConfig): any => ({ ...obj, }); } -/** - *

    Describes a work team of a vendor that does the a labelling job.

    - */ -export interface SubscribedWorkteam { +export interface DescribePipelineExecutionResponse { /** - *

    The Amazon Resource Name (ARN) of the vendor that you have subscribed.

    + *

    The Amazon Resource Name (ARN) of the pipeline.

    */ - WorkteamArn: string | undefined; + PipelineArn?: string; /** - *

    The title of the service provided by the vendor in the Amazon Marketplace.

    + *

    The Amazon Resource Name (ARN) of the pipeline execution.

    */ - MarketplaceTitle?: string; + PipelineExecutionArn?: string; /** - *

    The name of the vendor in the Amazon Marketplace.

    + *

    The display name of the pipeline execution.

    */ - SellerName?: string; + PipelineExecutionDisplayName?: string; /** - *

    The description of the vendor from the Amazon Marketplace.

    + *

    The status of the pipeline execution.

    */ - MarketplaceDescription?: string; + PipelineExecutionStatus?: PipelineExecutionStatus | string; /** - *

    Marketplace product listing ID.

    + *

    The description of the pipeline execution.

    */ - ListingId?: string; -} + PipelineExecutionDescription?: string; -export namespace SubscribedWorkteam { /** - * @internal + *

    Specifies the names of the experiment and trial created by a pipeline.

    */ - export const filterSensitiveLog = (obj: SubscribedWorkteam): any => ({ - ...obj, - }); -} + PipelineExperimentConfig?: PipelineExperimentConfig; -export interface DescribeSubscribedWorkteamResponse { /** - *

    A Workteam instance that contains information about the work team.

    + *

    If the execution failed, a message describing why.

    */ - SubscribedWorkteam: SubscribedWorkteam | undefined; + FailureReason?: string; + + /** + *

    The time when the pipeline execution was created.

    + */ + CreationTime?: Date; + + /** + *

    The time when the pipeline execution was modified last.

    + */ + LastModifiedTime?: Date; + + /** + *

    Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

    + */ + CreatedBy?: UserContext; + + /** + *

    Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

    + */ + LastModifiedBy?: UserContext; + + /** + *

    The parallelism configuration applied to the pipeline.

    + */ + ParallelismConfiguration?: ParallelismConfiguration; } -export namespace DescribeSubscribedWorkteamResponse { +export namespace DescribePipelineExecutionResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeSubscribedWorkteamResponse): any => ({ + export const filterSensitiveLog = (obj: DescribePipelineExecutionResponse): any => ({ ...obj, }); } -export interface DescribeTrainingJobRequest { +export interface DescribeProcessingJobRequest { /** - *

    The name of the training job.

    + *

    The name of the processing job. The name must be unique within an Amazon Web Services Region in the + * Amazon Web Services account.

    */ - TrainingJobName: string | undefined; + ProcessingJobName: string | undefined; } -export namespace DescribeTrainingJobRequest { +export namespace DescribeProcessingJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTrainingJobRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeProcessingJobRequest): any => ({ ...obj, }); } -/** - *

    The name, value, and date and time of a metric that was emitted to Amazon CloudWatch.

    - */ -export interface MetricData { +export enum ProcessingJobStatus { + COMPLETED = "Completed", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + STOPPED = "Stopped", + STOPPING = "Stopping", +} + +export interface DescribeProcessingJobResponse { /** - *

    The name of the metric.

    + *

    The inputs for a processing job.

    */ - MetricName?: string; + ProcessingInputs?: ProcessingInput[]; /** - *

    The value of the metric.

    + *

    Output configuration for the processing job.

    */ - Value?: number; + ProcessingOutputConfig?: ProcessingOutputConfig; /** - *

    The date and time that the algorithm emitted the metric.

    + *

    The name of the processing job. The name must be unique within an Amazon Web Services Region in the + * Amazon Web Services account.

    */ - Timestamp?: Date; -} + ProcessingJobName: string | undefined; -export namespace MetricData { /** - * @internal + *

    Identifies the resources, ML compute instances, and ML storage volumes to deploy for a + * processing job. In distributed training, you specify more than one instance.

    */ - export const filterSensitiveLog = (obj: MetricData): any => ({ - ...obj, - }); -} + ProcessingResources: ProcessingResources | undefined; -/** - *

    Information about the status of the rule evaluation.

    - */ -export interface ProfilerRuleEvaluationStatus { /** - *

    The name of the rule configuration.

    + *

    The time limit for how long the processing job is allowed to run.

    */ - RuleConfigurationName?: string; + StoppingCondition?: ProcessingStoppingCondition; /** - *

    The Amazon Resource Name (ARN) of the rule evaluation job.

    + *

    Configures the processing job to run a specified container image.

    */ - RuleEvaluationJobArn?: string; + AppSpecification: AppSpecification | undefined; /** - *

    Status of the rule evaluation.

    + *

    The environment variables set in the Docker container.

    */ - RuleEvaluationStatus?: RuleEvaluationStatus | string; + Environment?: Record; /** - *

    Details from the rule evaluation.

    + *

    Networking options for a processing job.

    */ - StatusDetails?: string; + NetworkConfig?: NetworkConfig; /** - *

    Timestamp when the rule evaluation status was last modified.

    + *

    The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on + * your behalf.

    */ - LastModifiedTime?: Date; -} + RoleArn?: string; -export namespace ProfilerRuleEvaluationStatus { /** - * @internal + *

    The configuration information used to create an experiment.

    */ - export const filterSensitiveLog = (obj: ProfilerRuleEvaluationStatus): any => ({ - ...obj, - }); -} + ExperimentConfig?: ExperimentConfig; -export enum ProfilingStatus { - DISABLED = "Disabled", - ENABLED = "Enabled", -} - -export enum SecondaryStatus { - COMPLETED = "Completed", - DOWNLOADING = "Downloading", - DOWNLOADING_TRAINING_IMAGE = "DownloadingTrainingImage", - FAILED = "Failed", - INTERRUPTED = "Interrupted", - LAUNCHING_ML_INSTANCES = "LaunchingMLInstances", - MAX_RUNTIME_EXCEEDED = "MaxRuntimeExceeded", - MAX_WAIT_TIME_EXCEEDED = "MaxWaitTimeExceeded", - PREPARING_TRAINING_STACK = "PreparingTrainingStack", - RESTARTING = "Restarting", - STARTING = "Starting", - STOPPED = "Stopped", - STOPPING = "Stopping", - TRAINING = "Training", - UPDATING = "Updating", - UPLOADING = "Uploading", -} - -/** - *

    An array element of DescribeTrainingJobResponse$SecondaryStatusTransitions. It provides - * additional details about a status that the training job has transitioned through. A - * training job can be in one of several states, for example, starting, downloading, - * training, or uploading. Within each state, there are a number of intermediate states. - * For example, within the starting state, SageMaker could be starting the training job or - * launching the ML instances. These transitional states are referred to as the job's - * secondary - * status. - *

    - *

    - */ -export interface SecondaryStatusTransition { - /** - *

    Contains a secondary status information from a training - * job.

    - *

    Status might be one of the following secondary statuses:

    - *
    - *
    InProgress
    - *
    - *
      - *
    • - *

      - * Starting - * - Starting the training job.

      - *
    • - *
    • - *

      - * Downloading - An optional stage for algorithms that - * support File training input mode. It indicates that - * data is being downloaded to the ML storage volumes.

      - *
    • - *
    • - *

      - * Training - Training is in progress.

      - *
    • - *
    • - *

      - * Uploading - Training is complete and the model - * artifacts are being uploaded to the S3 location.

      - *
    • - *
    - *
    - *
    Completed
    - *
    - *
      - *
    • - *

      - * Completed - The training job has completed.

      - *
    • - *
    - *
    - *
    Failed
    - *
    - *
      - *
    • - *

      - * Failed - The training job has failed. The reason for - * the failure is returned in the FailureReason field of - * DescribeTrainingJobResponse.

      - *
    • - *
    - *
    - *
    Stopped
    - *
    - *
      - *
    • - *

      - * MaxRuntimeExceeded - The job stopped because it - * exceeded the maximum allowed runtime.

      - *
    • - *
    • - *

      - * Stopped - The training job has stopped.

      - *
    • - *
    - *
    - *
    Stopping
    - *
    - *
      - *
    • - *

      - * Stopping - Stopping the training job.

      - *
    • - *
    - *
    - *
    - *

    We no longer support the following secondary statuses:

    - *
      - *
    • - *

      - * LaunchingMLInstances - *

      - *
    • - *
    • - *

      - * PreparingTrainingStack - *

      - *
    • - *
    • - *

      - * DownloadingTrainingImage - *

      - *
    • - *
    - */ - Status: SecondaryStatus | string | undefined; - - /** - *

    A timestamp that shows when the training job transitioned to the current secondary - * status state.

    - */ - StartTime: Date | undefined; - - /** - *

    A timestamp that shows when the training job transitioned out of this secondary status - * state into another secondary status state or when the training job has ended.

    - */ - EndTime?: Date; - - /** - *

    A detailed description of the progress within a secondary status. - *

    - *

    SageMaker provides secondary statuses and status messages that apply to each of - * them:

    - *
    - *
    Starting
    - *
    - *
      - *
    • - *

      Starting the training job.

      - *
    • - *
    • - *

      Launching requested ML - * instances.

      - *
    • - *
    • - *

      Insufficient - * capacity error from EC2 while launching instances, - * retrying!

      - *
    • - *
    • - *

      Launched - * instance was unhealthy, replacing it!

      - *
    • - *
    • - *

      Preparing the instances for training.

      - *
    • - *
    - *
    - *
    Training
    - *
    - *
      - *
    • - *

      Downloading the training image.

      - *
    • - *
    • - *

      Training - * image download completed. Training in - * progress.

      - *
    • - *
    - *
    - *
    - * - *

    Status messages are subject to change. Therefore, we recommend not including them - * in code that programmatically initiates actions. For examples, don't use status - * messages in if statements.

    - *
    - *

    To have an overview of your training job's progress, view - * TrainingJobStatus and SecondaryStatus in DescribeTrainingJob, and StatusMessage together. For - * example, at the start of a training job, you might see the following:

    - *
      - *
    • - *

      - * TrainingJobStatus - InProgress

      - *
    • - *
    • - *

      - * SecondaryStatus - Training

      - *
    • - *
    • - *

      - * StatusMessage - Downloading the training image

      - *
    • - *
    - */ - StatusMessage?: string; -} - -export namespace SecondaryStatusTransition { - /** - * @internal - */ - export const filterSensitiveLog = (obj: SecondaryStatusTransition): any => ({ - ...obj, - }); -} - -export interface DescribeTrainingJobResponse { - /** - *

    Name of the model training job.

    - */ - TrainingJobName: string | undefined; - - /** - *

    The Amazon Resource Name (ARN) of the training job.

    - */ - TrainingJobArn: string | undefined; - - /** - *

    The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the - * training job was launched by a hyperparameter tuning job.

    - */ - TuningJobArn?: string; - - /** - *

    The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the - * transform or training job.

    - */ - LabelingJobArn?: string; - - /** - *

    The Amazon Resource Name (ARN) of an AutoML job.

    - */ - AutoMLJobArn?: string; - - /** - *

    Information about the Amazon S3 location that is configured for storing model artifacts. - *

    - */ - ModelArtifacts: ModelArtifacts | undefined; - - /** - *

    The status of the training job.

    - *

    SageMaker provides the following training job statuses:

    - *
      - *
    • - *

      - * InProgress - The training is in progress.

      - *
    • - *
    • - *

      - * Completed - The training job has completed.

      - *
    • - *
    • - *

      - * Failed - The training job has failed. To see the reason for the - * failure, see the FailureReason field in the response to a - * DescribeTrainingJobResponse call.

      - *
    • - *
    • - *

      - * Stopping - The training job is stopping.

      - *
    • - *
    • - *

      - * Stopped - The training job has stopped.

      - *
    • - *
    - *

    For more detailed information, see SecondaryStatus.

    - */ - TrainingJobStatus: TrainingJobStatus | string | undefined; - - /** - *

    Provides detailed information about the state of the training job. For detailed - * information on the secondary status of the training job, see StatusMessage - * under SecondaryStatusTransition.

    - *

    SageMaker provides primary statuses and secondary statuses that apply to each of - * them:

    - *
    - *
    InProgress
    - *
    - *
      - *
    • - *

      - * Starting - * - Starting the training job.

      - *
    • - *
    • - *

      - * Downloading - An optional stage for algorithms that - * support File training input mode. It indicates that - * data is being downloaded to the ML storage volumes.

      - *
    • - *
    • - *

      - * Training - Training is in progress.

      - *
    • - *
    • - *

      - * Interrupted - The job stopped because the managed - * spot training instances were interrupted.

      - *
    • - *
    • - *

      - * Uploading - Training is complete and the model - * artifacts are being uploaded to the S3 location.

      - *
    • - *
    - *
    - *
    Completed
    - *
    - *
      - *
    • - *

      - * Completed - The training job has completed.

      - *
    • - *
    - *
    - *
    Failed
    - *
    - *
      - *
    • - *

      - * Failed - The training job has failed. The reason for - * the failure is returned in the FailureReason field of - * DescribeTrainingJobResponse.

      - *
    • - *
    - *
    - *
    Stopped
    - *
    - *
      - *
    • - *

      - * MaxRuntimeExceeded - The job stopped because it - * exceeded the maximum allowed runtime.

      - *
    • - *
    • - *

      - * MaxWaitTimeExceeded - The job stopped because it - * exceeded the maximum allowed wait time.

      - *
    • - *
    • - *

      - * Stopped - The training job has stopped.

      - *
    • - *
    - *
    - *
    Stopping
    - *
    - *
      - *
    • - *

      - * Stopping - Stopping the training job.

      - *
    • - *
    - *
    - *
    - * - * - *

    Valid values for SecondaryStatus are subject to change.

    - *
    - *

    We no longer support the following secondary statuses:

    - *
      - *
    • - *

      - * LaunchingMLInstances - *

      - *
    • - *
    • - *

      - * PreparingTraining - *

      - *
    • - *
    • - *

      - * DownloadingTrainingImage - *

      - *
    • - *
    - */ - SecondaryStatus: SecondaryStatus | string | undefined; - - /** - *

    If the training job failed, the reason it failed.

    - */ - FailureReason?: string; - - /** - *

    Algorithm-specific parameters.

    - */ - HyperParameters?: Record; - - /** - *

    Information about the algorithm used for training, and algorithm metadata. - *

    - */ - AlgorithmSpecification: AlgorithmSpecification | undefined; - - /** - *

    The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.

    - */ - RoleArn?: string; - - /** - *

    An array of Channel objects that describes each data input channel. - *

    - */ - InputDataConfig?: Channel[]; - - /** - *

    The S3 path where model artifacts that you configured when creating the job are - * stored. SageMaker creates subfolders for model artifacts.

    - */ - OutputDataConfig?: OutputDataConfig; - - /** - *

    Resources, including ML compute instances and ML storage volumes, that are - * configured for model training.

    - */ - ResourceConfig: ResourceConfig | undefined; + /** + *

    The Amazon Resource Name (ARN) of the processing job.

    + */ + ProcessingJobArn: string | undefined; /** - *

    A VpcConfig object that specifies the VPC that this training job has - * access to. For more information, see Protect Training Jobs by Using an Amazon - * Virtual Private Cloud.

    + *

    Provides the status of a processing job.

    */ - VpcConfig?: VpcConfig; + ProcessingJobStatus: ProcessingJobStatus | string | undefined; /** - *

    Specifies a limit to how long a model training job can run. It also specifies how long - * a managed Spot training job has to complete. When the job reaches the time limit, SageMaker - * ends the training job. Use this API to cap model training costs.

    - *

    To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays - * job termination for 120 seconds. Algorithms can use this 120-second window to save the - * model artifacts, so the results of training are not lost.

    + *

    An optional string, up to one KB in size, that contains metadata from the processing + * container when the processing job exits.

    */ - StoppingCondition: StoppingCondition | undefined; + ExitMessage?: string; /** - *

    A timestamp that indicates when the training job was created.

    + *

    A string, up to one KB in size, that contains the reason a processing job failed, if + * it failed.

    */ - CreationTime: Date | undefined; + FailureReason?: string; /** - *

    Indicates the time when the training job starts on training instances. You are - * billed for the time interval between this time and the value of - * TrainingEndTime. The start time in CloudWatch Logs might be later than this time. - * The difference is due to the time it takes to download the training data and to the size - * of the training container.

    + *

    The time at which the processing job completed.

    */ - TrainingStartTime?: Date; + ProcessingEndTime?: Date; /** - *

    Indicates the time when the training job ends on training instances. You are billed - * for the time interval between the value of TrainingStartTime and this time. - * For successful jobs and stopped jobs, this is the time after model artifacts are - * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.

    + *

    The time at which the processing job started.

    */ - TrainingEndTime?: Date; + ProcessingStartTime?: Date; /** - *

    A timestamp that indicates when the status of the training job was last - * modified.

    + *

    The time at which the processing job was last modified.

    */ LastModifiedTime?: Date; /** - *

    A history of all of the secondary statuses that the training job has transitioned - * through.

    + *

    The time at which the processing job was created.

    */ - SecondaryStatusTransitions?: SecondaryStatusTransition[]; + CreationTime: Date | undefined; /** - *

    A collection of MetricData objects that specify the names, values, and - * dates and times that the training algorithm emitted to Amazon CloudWatch.

    + *

    The ARN of a monitoring schedule for an endpoint associated with this processing + * job.

    */ - FinalMetricDataList?: MetricData[]; + MonitoringScheduleArn?: string; /** - *

    If you want to allow inbound or outbound network calls, except for calls between peers - * within a training cluster for distributed training, choose True. If you - * enable network isolation for training jobs that are configured to use a VPC, SageMaker - * downloads and uploads customer data and model artifacts through the specified VPC, but - * the training container does not have network access.

    + *

    The ARN of an AutoML job associated with this processing job.

    */ - EnableNetworkIsolation?: boolean; + AutoMLJobArn?: string; /** - *

    To encrypt all communications between ML compute instances in distributed training, - * choose True. Encryption provides greater security for distributed training, - * but training might take longer. How long it takes depends on the amount of communication - * between compute instances, especially if you use a deep learning algorithms in - * distributed training.

    + *

    The ARN of a training job associated with this processing job.

    */ - EnableInterContainerTrafficEncryption?: boolean; + TrainingJobArn?: string; +} +export namespace DescribeProcessingJobResponse { /** - *

    A Boolean indicating whether managed spot training is enabled (True) or - * not (False).

    + * @internal */ - EnableManagedSpotTraining?: boolean; + export const filterSensitiveLog = (obj: DescribeProcessingJobResponse): any => ({ + ...obj, + }); +} +export interface DescribeProjectInput { /** - *

    Contains information about the output location for managed spot training checkpoint - * data.

    + *

    The name of the project to describe.

    */ - CheckpointConfig?: CheckpointConfig; + ProjectName: string | undefined; +} +export namespace DescribeProjectInput { /** - *

    The training time in seconds.

    + * @internal */ - TrainingTimeInSeconds?: number; + export const filterSensitiveLog = (obj: DescribeProjectInput): any => ({ + ...obj, + }); +} - /** - *

    The billable time in seconds. Billable time refers to the absolute wall-clock - * time.

    - *

    Multiply BillableTimeInSeconds by the number of instances - * (InstanceCount) in your training cluster to get the total compute time - * SageMaker bills you if you run distributed training. The formula is as follows: - * BillableTimeInSeconds * InstanceCount .

    - *

    You can calculate the savings from using managed spot training using the formula - * (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, - * if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is - * 500, the savings is 80%.

    - */ - BillableTimeInSeconds?: number; +export enum ProjectStatus { + CREATE_COMPLETED = "CreateCompleted", + CREATE_FAILED = "CreateFailed", + CREATE_IN_PROGRESS = "CreateInProgress", + DELETE_COMPLETED = "DeleteCompleted", + DELETE_FAILED = "DeleteFailed", + DELETE_IN_PROGRESS = "DeleteInProgress", + PENDING = "Pending", + UPDATE_COMPLETED = "UpdateCompleted", + UPDATE_FAILED = "UpdateFailed", + UPDATE_IN_PROGRESS = "UpdateInProgress", +} +/** + *

    Details of a provisioned service catalog product. For information about service catalog, + * see What is Amazon Web Services Service + * Catalog.

    + */ +export interface ServiceCatalogProvisionedProductDetails { /** - *

    Configuration information for the Debugger hook parameters, metric and tensor collections, and - * storage paths. To learn more about - * how to configure the DebugHookConfig parameter, - * see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job.

    + *

    The ID of the provisioned product.

    */ - DebugHookConfig?: DebugHookConfig; + ProvisionedProductId?: string; /** - *

    Associates a SageMaker job as a trial component with an experiment and trial. Specified when - * you call the following APIs:

    - *
      + *

      The current status of the product.

      + *
        *
      • - *

        - * CreateProcessingJob - *

        + *

        + * AVAILABLE - Stable state, ready to perform any operation. The most recent operation succeeded and completed.

        *
      • *
      • - *

        - * CreateTrainingJob - *

        + *

        + * UNDER_CHANGE - Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.

        *
      • *
      • - *

        - * CreateTransformJob - *

        + *

        + * TAINTED - Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.

        + *
      • + *
      • + *

        + * ERROR - An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.

        + *
      • + *
      • + *

        + * PLAN_IN_PROGRESS - Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.

        *
      • *
      */ - ExperimentConfig?: ExperimentConfig; - - /** - *

      Configuration information for Debugger rules for debugging output tensors.

      - */ - DebugRuleConfigurations?: DebugRuleConfiguration[]; - - /** - *

      Configuration of storage locations for the Debugger TensorBoard output data.

      - */ - TensorBoardOutputConfig?: TensorBoardOutputConfig; - - /** - *

      Evaluation status of Debugger rules for debugging on a training job.

      - */ - DebugRuleEvaluationStatuses?: DebugRuleEvaluationStatus[]; - - /** - *

      Configuration information for Debugger system monitoring, framework profiling, and - * storage paths.

      - */ - ProfilerConfig?: ProfilerConfig; - - /** - *

      Configuration information for Debugger rules for profiling system and framework - * metrics.

      - */ - ProfilerRuleConfigurations?: ProfilerRuleConfiguration[]; - - /** - *

      Evaluation status of Debugger rules for profiling on a training job.

      - */ - ProfilerRuleEvaluationStatuses?: ProfilerRuleEvaluationStatus[]; - - /** - *

      Profiling status of a training job.

      - */ - ProfilingStatus?: ProfilingStatus | string; - - /** - *

      The number of times to retry the job when the job fails due to an - * InternalServerError.

      - */ - RetryStrategy?: RetryStrategy; - - /** - *

      The environment variables to set in the Docker container.

      - */ - Environment?: Record; -} - -export namespace DescribeTrainingJobResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeTrainingJobResponse): any => ({ - ...obj, - }); -} - -export interface DescribeTransformJobRequest { - /** - *

      The name of the transform job that you want to view details of.

      - */ - TransformJobName: string | undefined; + ProvisionedProductStatusMessage?: string; } -export namespace DescribeTransformJobRequest { +export namespace ServiceCatalogProvisionedProductDetails { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTransformJobRequest): any => ({ + export const filterSensitiveLog = (obj: ServiceCatalogProvisionedProductDetails): any => ({ ...obj, }); } -export enum TransformJobStatus { - COMPLETED = "Completed", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - STOPPED = "Stopped", - STOPPING = "Stopping", -} - -export interface DescribeTransformJobResponse { - /** - *

      The name of the transform job.

      - */ - TransformJobName: string | undefined; - - /** - *

      The Amazon Resource Name (ARN) of the transform job.

      - */ - TransformJobArn: string | undefined; - +export interface DescribeProjectOutput { /** - *

      The - * status of the transform job. If the transform job failed, the reason - * is returned in the FailureReason field.

      + *

      The Amazon Resource Name (ARN) of the project.

      */ - TransformJobStatus: TransformJobStatus | string | undefined; + ProjectArn: string | undefined; /** - *

      If the transform job failed, FailureReason describes - * why - * it failed. A transform job creates a log file, which includes error - * messages, and stores it - * as - * an Amazon S3 object. For more information, see Log Amazon SageMaker Events with - * Amazon CloudWatch.

      + *

      The name of the project.

      */ - FailureReason?: string; + ProjectName: string | undefined; /** - *

      The name of the model used in the transform job.

      + *

      The ID of the project.

      */ - ModelName: string | undefined; + ProjectId: string | undefined; /** - *

      The - * maximum number - * of - * parallel requests on each instance node - * that can be launched in a transform job. The default value is 1.

      + *

      The description of the project.

      */ - MaxConcurrentTransforms?: number; + ProjectDescription?: string; /** - *

      The timeout and maximum number of retries for processing a transform job - * invocation.

      + *

      Information used to provision a service catalog product. For information, see What is Amazon Web Services Service + * Catalog.

      */ - ModelClientConfig?: ModelClientConfig; + ServiceCatalogProvisioningDetails: ServiceCatalogProvisioningDetails | undefined; /** - *

      The - * maximum - * payload size, in MB, used in the - * transform job.

      + *

      Information about a provisioned service catalog product.

      */ - MaxPayloadInMB?: number; + ServiceCatalogProvisionedProductDetails?: ServiceCatalogProvisionedProductDetails; /** - *

      Specifies the number of records to include in a mini-batch for an HTTP inference - * request. - * A record - * is a single unit of input data that inference - * can be made on. For example, a single line in a CSV file is a record.

      - *

      To enable the batch strategy, you must set SplitType - * to - * Line, RecordIO, or - * TFRecord.

      + *

      The status of the project.

      */ - BatchStrategy?: BatchStrategy | string; + ProjectStatus: ProjectStatus | string | undefined; /** - *

      The - * environment variables to set in the Docker container. We support up to 16 key and values - * entries in the map.

      + *

      Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

      */ - Environment?: Record; + CreatedBy?: UserContext; /** - *

      Describes the dataset to be transformed and the Amazon S3 location where it is - * stored.

      + *

      The time when the project was created.

      */ - TransformInput: TransformInput | undefined; + CreationTime: Date | undefined; /** - *

      Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the - * transform job.

      + *

      The timestamp when project was last modified.

      */ - TransformOutput?: TransformOutput; + LastModifiedTime?: Date; /** - *

      Describes - * the resources, including ML instance types and ML instance count, to - * use for the transform job.

      + *

      Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

      */ - TransformResources: TransformResources | undefined; + LastModifiedBy?: UserContext; +} +export namespace DescribeProjectOutput { /** - *

      A timestamp that shows when the transform Job was created.

      + * @internal */ - CreationTime: Date | undefined; + export const filterSensitiveLog = (obj: DescribeProjectOutput): any => ({ + ...obj, + }); +} +export interface DescribeStudioLifecycleConfigRequest { /** - *

      Indicates when the transform job starts - * on - * ML instances. You are billed for the time interval between this time - * and the value of TransformEndTime.

      + *

      The name of the Studio Lifecycle Configuration to describe.

      */ - TransformStartTime?: Date; + StudioLifecycleConfigName: string | undefined; +} +export namespace DescribeStudioLifecycleConfigRequest { /** - *

      Indicates when the transform job has been - * - * completed, or has stopped or failed. You are billed for the time - * interval between this time and the value of TransformStartTime.

      + * @internal */ - TransformEndTime?: Date; + export const filterSensitiveLog = (obj: DescribeStudioLifecycleConfigRequest): any => ({ + ...obj, + }); +} +export interface DescribeStudioLifecycleConfigResponse { /** - *

      The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the - * transform or training job.

      + *

      The ARN of the Lifecycle Configuration to describe.

      */ - LabelingJobArn?: string; + StudioLifecycleConfigArn?: string; /** - *

      The Amazon Resource Name (ARN) of the AutoML transform job.

      + *

      The name of the Studio Lifecycle Configuration that is described.

      */ - AutoMLJobArn?: string; + StudioLifecycleConfigName?: string; /** - *

      The data structure used to specify the data to be used for inference in a batch - * transform job and to associate the data that is relevant to the prediction results in - * the output. The input filter provided allows you to exclude input data that is not - * needed for inference in a batch transform job. The output filter provided allows you to - * include input data relevant to interpreting the predictions in the output from the job. - * For more information, see Associate Prediction - * Results with their Corresponding Input Records.

      + *

      The creation time of the Studio Lifecycle Configuration.

      */ - DataProcessing?: DataProcessing; + CreationTime?: Date; /** - *

      Associates a SageMaker job as a trial component with an experiment and trial. Specified when - * you call the following APIs:

      - * + *

      This value is equivalent to CreationTime because Studio Lifecycle Configurations are immutable.

      */ - ExperimentConfig?: ExperimentConfig; -} + LastModifiedTime?: Date; -export namespace DescribeTransformJobResponse { /** - * @internal + *

      The content of your Studio Lifecycle Configuration script.

      */ - export const filterSensitiveLog = (obj: DescribeTransformJobResponse): any => ({ - ...obj, - }); -} + StudioLifecycleConfigContent?: string; -export interface DescribeTrialRequest { /** - *

      The name of the trial to describe.

      + *

      The App type that the Lifecycle Configuration is attached to.

      */ - TrialName: string | undefined; + StudioLifecycleConfigAppType?: StudioLifecycleConfigAppType | string; } -export namespace DescribeTrialRequest { +export namespace DescribeStudioLifecycleConfigResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTrialRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeStudioLifecycleConfigResponse): any => ({ ...obj, }); } -/** - *

      The source of the trial.

      - */ -export interface TrialSource { - /** - *

      The Amazon Resource Name (ARN) of the source.

      - */ - SourceArn: string | undefined; - +export interface DescribeSubscribedWorkteamRequest { /** - *

      The source job type.

      + *

      The Amazon Resource Name (ARN) of the subscribed work team to describe.

      */ - SourceType?: string; + WorkteamArn: string | undefined; } -export namespace TrialSource { +export namespace DescribeSubscribedWorkteamRequest { /** * @internal */ - export const filterSensitiveLog = (obj: TrialSource): any => ({ + export const filterSensitiveLog = (obj: DescribeSubscribedWorkteamRequest): any => ({ ...obj, }); } -export interface DescribeTrialResponse { - /** - *

      The name of the trial.

      - */ - TrialName?: string; - +/** + *

      Describes a work team of a vendor that does the a labelling job.

      + */ +export interface SubscribedWorkteam { /** - *

      The Amazon Resource Name (ARN) of the trial.

      + *

      The Amazon Resource Name (ARN) of the vendor that you have subscribed.

      */ - TrialArn?: string; + WorkteamArn: string | undefined; /** - *

      The name of the trial as displayed. If DisplayName isn't specified, - * TrialName is displayed.

      + *

      The title of the service provided by the vendor in the Amazon Marketplace.

      */ - DisplayName?: string; + MarketplaceTitle?: string; /** - *

      The name of the experiment the trial is part of.

      + *

      The name of the vendor in the Amazon Marketplace.

      */ - ExperimentName?: string; + SellerName?: string; /** - *

      The Amazon Resource Name (ARN) of the source and, optionally, the job type.

      + *

      The description of the vendor from the Amazon Marketplace.

      */ - Source?: TrialSource; + MarketplaceDescription?: string; /** - *

      When the trial was created.

      + *

      Marketplace product listing ID.

      */ - CreationTime?: Date; + ListingId?: string; +} +export namespace SubscribedWorkteam { /** - *

      Who created the trial.

      + * @internal */ - CreatedBy?: UserContext; + export const filterSensitiveLog = (obj: SubscribedWorkteam): any => ({ + ...obj, + }); +} +export interface DescribeSubscribedWorkteamResponse { /** - *

      When the trial was last modified.

      + *

      A Workteam instance that contains information about the work team.

      */ - LastModifiedTime?: Date; + SubscribedWorkteam: SubscribedWorkteam | undefined; +} +export namespace DescribeSubscribedWorkteamResponse { /** - *

      Who last modified the trial.

      + * @internal */ - LastModifiedBy?: UserContext; + export const filterSensitiveLog = (obj: DescribeSubscribedWorkteamResponse): any => ({ + ...obj, + }); +} +export interface DescribeTrainingJobRequest { /** - *

      Metadata properties of the tracking entity, trial, or trial component.

      + *

      The name of the training job.

      */ - MetadataProperties?: MetadataProperties; + TrainingJobName: string | undefined; } -export namespace DescribeTrialResponse { +export namespace DescribeTrainingJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTrialResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeTrainingJobRequest): any => ({ ...obj, }); } -export interface DescribeTrialComponentRequest { +/** + *

      The name, value, and date and time of a metric that was emitted to Amazon CloudWatch.

      + */ +export interface MetricData { + /** + *

      The name of the metric.

      + */ + MetricName?: string; + + /** + *

      The value of the metric.

      + */ + Value?: number; + /** - *

      The name of the trial component to describe.

      + *

      The date and time that the algorithm emitted the metric.

      */ - TrialComponentName: string | undefined; + Timestamp?: Date; } -export namespace DescribeTrialComponentRequest { +export namespace MetricData { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTrialComponentRequest): any => ({ + export const filterSensitiveLog = (obj: MetricData): any => ({ ...obj, }); } /** - *

      A summary of the metrics of a trial component.

      + *

      Information about the status of the rule evaluation.

      */ -export interface TrialComponentMetricSummary { +export interface ProfilerRuleEvaluationStatus { /** - *

      The name of the metric.

      + *

      The name of the rule configuration.

      */ - MetricName?: string; + RuleConfigurationName?: string; /** - *

      The Amazon Resource Name (ARN) of the source.

      + *

      The Amazon Resource Name (ARN) of the rule evaluation job.

      */ - SourceArn?: string; + RuleEvaluationJobArn?: string; /** - *

      When the metric was last updated.

      + *

      Status of the rule evaluation.

      */ - TimeStamp?: Date; + RuleEvaluationStatus?: RuleEvaluationStatus | string; /** - *

      The maximum value of the metric.

      + *

      Details from the rule evaluation.

      */ - Max?: number; + StatusDetails?: string; /** - *

      The minimum value of the metric.

      + *

      Timestamp when the rule evaluation status was last modified.

      */ - Min?: number; + LastModifiedTime?: Date; +} +export namespace ProfilerRuleEvaluationStatus { /** - *

      The most recent value of the metric.

      + * @internal */ - Last?: number; + export const filterSensitiveLog = (obj: ProfilerRuleEvaluationStatus): any => ({ + ...obj, + }); +} + +export enum ProfilingStatus { + DISABLED = "Disabled", + ENABLED = "Enabled", +} + +export enum SecondaryStatus { + COMPLETED = "Completed", + DOWNLOADING = "Downloading", + DOWNLOADING_TRAINING_IMAGE = "DownloadingTrainingImage", + FAILED = "Failed", + INTERRUPTED = "Interrupted", + LAUNCHING_ML_INSTANCES = "LaunchingMLInstances", + MAX_RUNTIME_EXCEEDED = "MaxRuntimeExceeded", + MAX_WAIT_TIME_EXCEEDED = "MaxWaitTimeExceeded", + PREPARING_TRAINING_STACK = "PreparingTrainingStack", + RESTARTING = "Restarting", + STARTING = "Starting", + STOPPED = "Stopped", + STOPPING = "Stopping", + TRAINING = "Training", + UPDATING = "Updating", + UPLOADING = "Uploading", +} +/** + *

      An array element of DescribeTrainingJobResponse$SecondaryStatusTransitions. It provides + * additional details about a status that the training job has transitioned through. A + * training job can be in one of several states, for example, starting, downloading, + * training, or uploading. Within each state, there are a number of intermediate states. + * For example, within the starting state, SageMaker could be starting the training job or + * launching the ML instances. These transitional states are referred to as the job's + * secondary + * status. + *

      + *

      + */ +export interface SecondaryStatusTransition { /** - *

      The number of samples used to generate the metric.

      + *

      Contains a secondary status information from a training + * job.

      + *

      Status might be one of the following secondary statuses:

      + *
      + *
      InProgress
      + *
      + *
        + *
      • + *

        + * Starting + * - Starting the training job.

        + *
      • + *
      • + *

        + * Downloading - An optional stage for algorithms that + * support File training input mode. It indicates that + * data is being downloaded to the ML storage volumes.

        + *
      • + *
      • + *

        + * Training - Training is in progress.

        + *
      • + *
      • + *

        + * Uploading - Training is complete and the model + * artifacts are being uploaded to the S3 location.

        + *
      • + *
      + *
      + *
      Completed
      + *
      + *
        + *
      • + *

        + * Completed - The training job has completed.

        + *
      • + *
      + *
      + *
      Failed
      + *
      + *
        + *
      • + *

        + * Failed - The training job has failed. The reason for + * the failure is returned in the FailureReason field of + * DescribeTrainingJobResponse.

        + *
      • + *
      + *
      + *
      Stopped
      + *
      + *
        + *
      • + *

        + * MaxRuntimeExceeded - The job stopped because it + * exceeded the maximum allowed runtime.

        + *
      • + *
      • + *

        + * Stopped - The training job has stopped.

        + *
      • + *
      + *
      + *
      Stopping
      + *
      + *
        + *
      • + *

        + * Stopping - Stopping the training job.

        + *
      • + *
      + *
      + *
      + *

      We no longer support the following secondary statuses:

      + *
        + *
      • + *

        + * LaunchingMLInstances + *

        + *
      • + *
      • + *

        + * PreparingTrainingStack + *

        + *
      • + *
      • + *

        + * DownloadingTrainingImage + *

        + *
      • + *
      */ - Count?: number; + Status: SecondaryStatus | string | undefined; /** - *

      The average value of the metric.

      + *

      A timestamp that shows when the training job transitioned to the current secondary + * status state.

      */ - Avg?: number; + StartTime: Date | undefined; /** - *

      The standard deviation of the metric.

      + *

      A timestamp that shows when the training job transitioned out of this secondary status + * state into another secondary status state or when the training job has ended.

      */ - StdDev?: number; + EndTime?: Date; + + /** + *

      A detailed description of the progress within a secondary status. + *

      + *

      SageMaker provides secondary statuses and status messages that apply to each of + * them:

      + *
      + *
      Starting
      + *
      + *
        + *
      • + *

        Starting the training job.

        + *
      • + *
      • + *

        Launching requested ML + * instances.

        + *
      • + *
      • + *

        Insufficient + * capacity error from EC2 while launching instances, + * retrying!

        + *
      • + *
      • + *

        Launched + * instance was unhealthy, replacing it!

        + *
      • + *
      • + *

        Preparing the instances for training.

        + *
      • + *
      + *
      + *
      Training
      + *
      + *
        + *
      • + *

        Downloading the training image.

        + *
      • + *
      • + *

        Training + * image download completed. Training in + * progress.

        + *
      • + *
      + *
      + *
      + * + *

      Status messages are subject to change. Therefore, we recommend not including them + * in code that programmatically initiates actions. For examples, don't use status + * messages in if statements.

      + *
      + *

      To have an overview of your training job's progress, view + * TrainingJobStatus and SecondaryStatus in DescribeTrainingJob, and StatusMessage together. For + * example, at the start of a training job, you might see the following:

      + *
        + *
      • + *

        + * TrainingJobStatus - InProgress

        + *
      • + *
      • + *

        + * SecondaryStatus - Training

        + *
      • + *
      • + *

        + * StatusMessage - Downloading the training image

        + *
      • + *
      + */ + StatusMessage?: string; } -export namespace TrialComponentMetricSummary { +export namespace SecondaryStatusTransition { /** * @internal */ - export const filterSensitiveLog = (obj: TrialComponentMetricSummary): any => ({ + export const filterSensitiveLog = (obj: SecondaryStatusTransition): any => ({ ...obj, }); } -/** - *

      The Amazon Resource Name (ARN) and job type of the source of a trial component.

      - */ -export interface TrialComponentSource { +export interface DescribeTrainingJobResponse { /** - *

      The source ARN.

      + *

      Name of the model training job.

      */ - SourceArn: string | undefined; + TrainingJobName: string | undefined; /** - *

      The source job type.

      + *

      The Amazon Resource Name (ARN) of the training job.

      */ - SourceType?: string; -} + TrainingJobArn: string | undefined; -export namespace TrialComponentSource { /** - * @internal + *

      The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the + * training job was launched by a hyperparameter tuning job.

      */ - export const filterSensitiveLog = (obj: TrialComponentSource): any => ({ - ...obj, - }); -} + TuningJobArn?: string; -export interface DescribeTrialComponentResponse { /** - *

      The name of the trial component.

      + *

      The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the + * transform or training job.

      */ - TrialComponentName?: string; + LabelingJobArn?: string; /** - *

      The Amazon Resource Name (ARN) of the trial component.

      + *

      The Amazon Resource Name (ARN) of an AutoML job.

      */ - TrialComponentArn?: string; + AutoMLJobArn?: string; /** - *

      The name of the component as displayed. If DisplayName isn't specified, - * TrialComponentName is displayed.

      + *

      Information about the Amazon S3 location that is configured for storing model artifacts. + *

      */ - DisplayName?: string; + ModelArtifacts: ModelArtifacts | undefined; /** - *

      The Amazon Resource Name (ARN) of the source and, optionally, the job type.

      + *

      The status of the training job.

      + *

      SageMaker provides the following training job statuses:

      + *
        + *
      • + *

        + * InProgress - The training is in progress.

        + *
      • + *
      • + *

        + * Completed - The training job has completed.

        + *
      • + *
      • + *

        + * Failed - The training job has failed. To see the reason for the + * failure, see the FailureReason field in the response to a + * DescribeTrainingJobResponse call.

        + *
      • + *
      • + *

        + * Stopping - The training job is stopping.

        + *
      • + *
      • + *

        + * Stopped - The training job has stopped.

        + *
      • + *
      + *

      For more detailed information, see SecondaryStatus.

      */ - Source?: TrialComponentSource; + TrainingJobStatus: TrainingJobStatus | string | undefined; /** - *

      The status of the component. States include:

      - *
        + *

        Provides detailed information about the state of the training job. For detailed + * information on the secondary status of the training job, see StatusMessage + * under SecondaryStatusTransition.

        + *

        SageMaker provides primary statuses and secondary statuses that apply to each of + * them:

        + *
        + *
        InProgress
        + *
        + *
          + *
        • + *

          + * Starting + * - Starting the training job.

          + *
        • + *
        • + *

          + * Downloading - An optional stage for algorithms that + * support File training input mode. It indicates that + * data is being downloaded to the ML storage volumes.

          + *
        • + *
        • + *

          + * Training - Training is in progress.

          + *
        • + *
        • + *

          + * Interrupted - The job stopped because the managed + * spot training instances were interrupted.

          + *
        • + *
        • + *

          + * Uploading - Training is complete and the model + * artifacts are being uploaded to the S3 location.

          + *
        • + *
        + *
        + *
        Completed
        + *
        + *
          + *
        • + *

          + * Completed - The training job has completed.

          + *
        • + *
        + *
        + *
        Failed
        + *
        + *
          + *
        • + *

          + * Failed - The training job has failed. The reason for + * the failure is returned in the FailureReason field of + * DescribeTrainingJobResponse.

          + *
        • + *
        + *
        + *
        Stopped
        + *
        + *
          + *
        • + *

          + * MaxRuntimeExceeded - The job stopped because it + * exceeded the maximum allowed runtime.

          + *
        • + *
        • + *

          + * MaxWaitTimeExceeded - The job stopped because it + * exceeded the maximum allowed wait time.

          + *
        • + *
        • + *

          + * Stopped - The training job has stopped.

          + *
        • + *
        + *
        + *
        Stopping
        + *
        + *
          + *
        • + *

          + * Stopping - Stopping the training job.

          + *
        • + *
        + *
        + *
        + * + * + *

        Valid values for SecondaryStatus are subject to change.

        + *
        + *

        We no longer support the following secondary statuses:

        + *
          *
        • - *

          InProgress

          + *

          + * LaunchingMLInstances + *

          *
        • *
        • - *

          Completed

          + *

          + * PreparingTraining + *

          *
        • *
        • - *

          Failed

          + *

          + * DownloadingTrainingImage + *

          *
        • *
        */ - Status?: TrialComponentStatus; + SecondaryStatus: SecondaryStatus | string | undefined; /** - *

        When the component started.

        + *

        If the training job failed, the reason it failed.

        */ - StartTime?: Date; + FailureReason?: string; /** - *

        When the component ended.

        + *

        Algorithm-specific parameters.

        */ - EndTime?: Date; + HyperParameters?: Record; /** - *

        When the component was created.

        + *

        Information about the algorithm used for training, and algorithm metadata. + *

        */ - CreationTime?: Date; + AlgorithmSpecification: AlgorithmSpecification | undefined; /** - *

        Who created the trial component.

        + *

        The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.

        */ - CreatedBy?: UserContext; + RoleArn?: string; /** - *

        When the component was last modified.

        + *

        An array of Channel objects that describes each data input channel. + *

        */ - LastModifiedTime?: Date; + InputDataConfig?: Channel[]; /** - *

        Who last modified the component.

        + *

        The S3 path where model artifacts that you configured when creating the job are + * stored. SageMaker creates subfolders for model artifacts.

        */ - LastModifiedBy?: UserContext; + OutputDataConfig?: OutputDataConfig; /** - *

        The hyperparameters of the component.

        + *

        Resources, including ML compute instances and ML storage volumes, that are + * configured for model training.

        */ - Parameters?: Record; + ResourceConfig: ResourceConfig | undefined; /** - *

        The input artifacts of the component.

        + *

        A VpcConfig object that specifies the VPC that this training job has + * access to. For more information, see Protect Training Jobs by Using an Amazon + * Virtual Private Cloud.

        */ - InputArtifacts?: Record; + VpcConfig?: VpcConfig; /** - *

        The output artifacts of the component.

        + *

        Specifies a limit to how long a model training job can run. It also specifies how long + * a managed Spot training job has to complete. When the job reaches the time limit, SageMaker + * ends the training job. Use this API to cap model training costs.

        + *

        To stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays + * job termination for 120 seconds. Algorithms can use this 120-second window to save the + * model artifacts, so the results of training are not lost.

        + */ + StoppingCondition: StoppingCondition | undefined; + + /** + *

        A timestamp that indicates when the training job was created.

        + */ + CreationTime: Date | undefined; + + /** + *

        Indicates the time when the training job starts on training instances. You are + * billed for the time interval between this time and the value of + * TrainingEndTime. The start time in CloudWatch Logs might be later than this time. + * The difference is due to the time it takes to download the training data and to the size + * of the training container.

        + */ + TrainingStartTime?: Date; + + /** + *

        Indicates the time when the training job ends on training instances. You are billed + * for the time interval between the value of TrainingStartTime and this time. + * For successful jobs and stopped jobs, this is the time after model artifacts are + * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.

        + */ + TrainingEndTime?: Date; + + /** + *

        A timestamp that indicates when the status of the training job was last + * modified.

        + */ + LastModifiedTime?: Date; + + /** + *

        A history of all of the secondary statuses that the training job has transitioned + * through.

        */ - OutputArtifacts?: Record; + SecondaryStatusTransitions?: SecondaryStatusTransition[]; /** - *

        Metadata properties of the tracking entity, trial, or trial component.

        + *

        A collection of MetricData objects that specify the names, values, and + * dates and times that the training algorithm emitted to Amazon CloudWatch.

        */ - MetadataProperties?: MetadataProperties; + FinalMetricDataList?: MetricData[]; /** - *

        The metrics for the component.

        + *

        If you want to allow inbound or outbound network calls, except for calls between peers + * within a training cluster for distributed training, choose True. If you + * enable network isolation for training jobs that are configured to use a VPC, SageMaker + * downloads and uploads customer data and model artifacts through the specified VPC, but + * the training container does not have network access.

        */ - Metrics?: TrialComponentMetricSummary[]; + EnableNetworkIsolation?: boolean; /** - *

        The Amazon Resource Name (ARN) of the lineage group.

        + *

        To encrypt all communications between ML compute instances in distributed training, + * choose True. Encryption provides greater security for distributed training, + * but training might take longer. How long it takes depends on the amount of communication + * between compute instances, especially if you use a deep learning algorithms in + * distributed training.

        */ - LineageGroupArn?: string; -} + EnableInterContainerTrafficEncryption?: boolean; -export namespace DescribeTrialComponentResponse { /** - * @internal + *

        A Boolean indicating whether managed spot training is enabled (True) or + * not (False).

        */ - export const filterSensitiveLog = (obj: DescribeTrialComponentResponse): any => ({ - ...obj, - ...(obj.Parameters && { - Parameters: Object.entries(obj.Parameters).reduce( - (acc: any, [key, value]: [string, TrialComponentParameterValue]) => ({ - ...acc, - [key]: TrialComponentParameterValue.filterSensitiveLog(value), - }), - {} - ), - }), - }); -} + EnableManagedSpotTraining?: boolean; -export interface DescribeUserProfileRequest { /** - *

        The domain ID.

        + *

        Contains information about the output location for managed spot training checkpoint + * data.

        */ - DomainId: string | undefined; + CheckpointConfig?: CheckpointConfig; /** - *

        The user profile name. This value is not case sensitive.

        + *

        The training time in seconds.

        */ - UserProfileName: string | undefined; -} + TrainingTimeInSeconds?: number; -export namespace DescribeUserProfileRequest { /** - * @internal + *

        The billable time in seconds. Billable time refers to the absolute wall-clock + * time.

        + *

        Multiply BillableTimeInSeconds by the number of instances + * (InstanceCount) in your training cluster to get the total compute time + * SageMaker bills you if you run distributed training. The formula is as follows: + * BillableTimeInSeconds * InstanceCount .

        + *

        You can calculate the savings from using managed spot training using the formula + * (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, + * if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is + * 500, the savings is 80%.

        */ - export const filterSensitiveLog = (obj: DescribeUserProfileRequest): any => ({ - ...obj, - }); -} - -export enum UserProfileStatus { - Delete_Failed = "Delete_Failed", - Deleting = "Deleting", - Failed = "Failed", - InService = "InService", - Pending = "Pending", - Update_Failed = "Update_Failed", - Updating = "Updating", -} + BillableTimeInSeconds?: number; -export interface DescribeUserProfileResponse { /** - *

        The ID of the domain that contains the profile.

        + *

        Configuration information for the Debugger hook parameters, metric and tensor collections, and + * storage paths. To learn more about + * how to configure the DebugHookConfig parameter, + * see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job.

        */ - DomainId?: string; + DebugHookConfig?: DebugHookConfig; /** - *

        The user profile Amazon Resource Name (ARN).

        + *

        Associates a SageMaker job as a trial component with an experiment and trial. Specified when + * you call the following APIs:

        + * */ - UserProfileArn?: string; + ExperimentConfig?: ExperimentConfig; /** - *

        The user profile name.

        + *

        Configuration information for Debugger rules for debugging output tensors.

        */ - UserProfileName?: string; + DebugRuleConfigurations?: DebugRuleConfiguration[]; /** - *

        The ID of the user's profile in the Amazon Elastic File System (EFS) volume.

        + *

        Configuration of storage locations for the Debugger TensorBoard output data.

        */ - HomeEfsFileSystemUid?: string; + TensorBoardOutputConfig?: TensorBoardOutputConfig; /** - *

        The status.

        + *

        Evaluation status of Debugger rules for debugging on a training job.

        */ - Status?: UserProfileStatus | string; + DebugRuleEvaluationStatuses?: DebugRuleEvaluationStatus[]; /** - *

        The last modified time.

        + *

        Configuration information for Debugger system monitoring, framework profiling, and + * storage paths.

        */ - LastModifiedTime?: Date; + ProfilerConfig?: ProfilerConfig; /** - *

        The creation time.

        + *

        Configuration information for Debugger rules for profiling system and framework + * metrics.

        */ - CreationTime?: Date; + ProfilerRuleConfigurations?: ProfilerRuleConfiguration[]; /** - *

        The failure reason.

        + *

        Evaluation status of Debugger rules for profiling on a training job.

        */ - FailureReason?: string; + ProfilerRuleEvaluationStatuses?: ProfilerRuleEvaluationStatus[]; /** - *

        The SSO user identifier.

        + *

        Profiling status of a training job.

        */ - SingleSignOnUserIdentifier?: string; + ProfilingStatus?: ProfilingStatus | string; /** - *

        The SSO user value.

        + *

        The number of times to retry the job when the job fails due to an + * InternalServerError.

        */ - SingleSignOnUserValue?: string; + RetryStrategy?: RetryStrategy; /** - *

        A collection of settings.

        + *

        The environment variables to set in the Docker container.

        */ - UserSettings?: UserSettings; + Environment?: Record; } -export namespace DescribeUserProfileResponse { +export namespace DescribeTrainingJobResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeUserProfileResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeTrainingJobResponse): any => ({ ...obj, }); } -export interface DescribeWorkforceRequest { +export interface DescribeTransformJobRequest { /** - *

        The name of the private workforce whose access you want to restrict. - * WorkforceName is automatically set to default when a - * workforce is created and cannot be modified.

        + *

        The name of the transform job that you want to view details of.

        */ - WorkforceName: string | undefined; + TransformJobName: string | undefined; } -export namespace DescribeWorkforceRequest { +export namespace DescribeTransformJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeWorkforceRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeTransformJobRequest): any => ({ ...obj, }); } -/** - *

        Your OIDC IdP workforce configuration.

        - */ -export interface OidcConfigForResponse { - /** - *

        The OIDC IdP client ID used to configure your private workforce.

        - */ - ClientId?: string; - - /** - *

        The OIDC IdP issuer used to configure your private workforce.

        - */ - Issuer?: string; - - /** - *

        The OIDC IdP authorization endpoint used to configure your private workforce.

        - */ - AuthorizationEndpoint?: string; - - /** - *

        The OIDC IdP token endpoint used to configure your private workforce.

        - */ - TokenEndpoint?: string; +export enum TransformJobStatus { + COMPLETED = "Completed", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + STOPPED = "Stopped", + STOPPING = "Stopping", +} +export interface DescribeTransformJobResponse { /** - *

        The OIDC IdP user information endpoint used to configure your private workforce.

        + *

        The name of the transform job.

        */ - UserInfoEndpoint?: string; + TransformJobName: string | undefined; /** - *

        The OIDC IdP logout endpoint used to configure your private workforce.

        + *

        The Amazon Resource Name (ARN) of the transform job.

        */ - LogoutEndpoint?: string; + TransformJobArn: string | undefined; /** - *

        The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.

        + *

        The + * status of the transform job. If the transform job failed, the reason + * is returned in the FailureReason field.

        */ - JwksUri?: string; -} + TransformJobStatus: TransformJobStatus | string | undefined; -export namespace OidcConfigForResponse { /** - * @internal + *

        If the transform job failed, FailureReason describes + * why + * it failed. A transform job creates a log file, which includes error + * messages, and stores it + * as + * an Amazon S3 object. For more information, see Log Amazon SageMaker Events with + * Amazon CloudWatch.

        */ - export const filterSensitiveLog = (obj: OidcConfigForResponse): any => ({ - ...obj, - }); -} - -export enum WorkforceStatus { - ACTIVE = "Active", - DELETING = "Deleting", - FAILED = "Failed", - INITIALIZING = "Initializing", - UPDATING = "Updating", -} + FailureReason?: string; -/** - *

        A VpcConfig object that specifies the VPC that you want your workforce to connect to.

        - */ -export interface WorkforceVpcConfigResponse { /** - *

        The ID of the VPC that the workforce uses for communication.

        + *

        The name of the model used in the transform job.

        */ - VpcId: string | undefined; + ModelName: string | undefined; /** - *

        The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.

        + *

        The + * maximum number + * of + * parallel requests on each instance node + * that can be launched in a transform job. The default value is 1.

        */ - SecurityGroupIds: string[] | undefined; + MaxConcurrentTransforms?: number; /** - *

        The ID of the subnets in the VPC that you want to connect.

        + *

        The timeout and maximum number of retries for processing a transform job + * invocation.

        */ - Subnets: string[] | undefined; + ModelClientConfig?: ModelClientConfig; /** - *

        The IDs for the VPC service endpoints of your VPC workforce when it is created and updated.

        + *

        The + * maximum + * payload size, in MB, used in the + * transform job.

        */ - VpcEndpointId?: string; -} + MaxPayloadInMB?: number; -export namespace WorkforceVpcConfigResponse { /** - * @internal + *

        Specifies the number of records to include in a mini-batch for an HTTP inference + * request. + * A record + * is a single unit of input data that inference + * can be made on. For example, a single line in a CSV file is a record.

        + *

        To enable the batch strategy, you must set SplitType + * to + * Line, RecordIO, or + * TFRecord.

        */ - export const filterSensitiveLog = (obj: WorkforceVpcConfigResponse): any => ({ - ...obj, - }); -} + BatchStrategy?: BatchStrategy | string; -/** - *

        A single private workforce, which is automatically created when you create your first - * private work team. You can create one private work force in each Amazon Web Services Region. By default, - * any workforce-related API operation used in a specific region will apply to the - * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

        - */ -export interface Workforce { /** - *

        The name of the private workforce.

        + *

        The + * environment variables to set in the Docker container. We support up to 16 key and values + * entries in the map.

        */ - WorkforceName: string | undefined; + Environment?: Record; /** - *

        The Amazon Resource Name (ARN) of the private workforce.

        + *

        Describes the dataset to be transformed and the Amazon S3 location where it is + * stored.

        */ - WorkforceArn: string | undefined; + TransformInput: TransformInput | undefined; /** - *

        The most recent date that was used to - * successfully add one or more IP address ranges (CIDRs) to a private workforce's - * allow list.

        + *

        Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the + * transform job.

        */ - LastUpdatedDate?: Date; + TransformOutput?: TransformOutput; /** - *

        A list of one to ten IP address ranges (CIDRs) to be added to the - * workforce allow list. By default, a workforce isn't restricted to specific IP addresses.

        + *

        Describes + * the resources, including ML instance types and ML instance count, to + * use for the transform job.

        */ - SourceIpConfig?: SourceIpConfig; + TransformResources: TransformResources | undefined; /** - *

        The subdomain for your OIDC Identity Provider.

        + *

        A timestamp that shows when the transform Job was created.

        */ - SubDomain?: string; + CreationTime: Date | undefined; /** - *

        The configuration of an Amazon Cognito workforce. - * A single Cognito workforce is created using and corresponds to a single - * - * Amazon Cognito user pool.

        + *

        Indicates when the transform job starts + * on + * ML instances. You are billed for the time interval between this time + * and the value of TransformEndTime.

        */ - CognitoConfig?: CognitoConfig; + TransformStartTime?: Date; /** - *

        The configuration of an OIDC Identity Provider (IdP) private workforce.

        + *

        Indicates when the transform job has been + * + * completed, or has stopped or failed. You are billed for the time + * interval between this time and the value of TransformStartTime.

        */ - OidcConfig?: OidcConfigForResponse; + TransformEndTime?: Date; /** - *

        The date that the workforce is created.

        + *

        The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the + * transform or training job.

        */ - CreateDate?: Date; + LabelingJobArn?: string; /** - *

        The configuration of a VPC workforce.

        + *

        The Amazon Resource Name (ARN) of the AutoML transform job.

        */ - WorkforceVpcConfig?: WorkforceVpcConfigResponse; + AutoMLJobArn?: string; /** - *

        The status of your workforce.

        + *

        The data structure used to specify the data to be used for inference in a batch + * transform job and to associate the data that is relevant to the prediction results in + * the output. The input filter provided allows you to exclude input data that is not + * needed for inference in a batch transform job. The output filter provided allows you to + * include input data relevant to interpreting the predictions in the output from the job. + * For more information, see Associate Prediction + * Results with their Corresponding Input Records.

        */ - Status?: WorkforceStatus | string; + DataProcessing?: DataProcessing; /** - *

        The reason your workforce failed.

        + *

        Associates a SageMaker job as a trial component with an experiment and trial. Specified when + * you call the following APIs:

        + * */ - FailureReason?: string; + ExperimentConfig?: ExperimentConfig; } -export namespace Workforce { +export namespace DescribeTransformJobResponse { /** * @internal */ - export const filterSensitiveLog = (obj: Workforce): any => ({ + export const filterSensitiveLog = (obj: DescribeTransformJobResponse): any => ({ ...obj, }); } -export interface DescribeWorkforceResponse { +export interface DescribeTrialRequest { /** - *

        A single private workforce, which is automatically created when you create your first - * private work team. You can create one private work force in each Amazon Web Services Region. By default, - * any workforce-related API operation used in a specific region will apply to the - * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

        + *

        The name of the trial to describe.

        */ - Workforce: Workforce | undefined; + TrialName: string | undefined; } -export namespace DescribeWorkforceResponse { +export namespace DescribeTrialRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeWorkforceResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeTrialRequest): any => ({ ...obj, }); } -export interface DescribeWorkteamRequest { +/** + *

        The source of the trial.

        + */ +export interface TrialSource { /** - *

        The name of the work team to return a description of.

        + *

        The Amazon Resource Name (ARN) of the source.

        */ - WorkteamName: string | undefined; + SourceArn: string | undefined; + + /** + *

        The source job type.

        + */ + SourceType?: string; } -export namespace DescribeWorkteamRequest { +export namespace TrialSource { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeWorkteamRequest): any => ({ + export const filterSensitiveLog = (obj: TrialSource): any => ({ ...obj, }); } -/** - *

        Provides details about a labeling work team.

        - */ -export interface Workteam { +export interface DescribeTrialResponse { /** - *

        The name of the work team.

        + *

        The name of the trial.

        */ - WorkteamName: string | undefined; + TrialName?: string; /** - *

        A list of MemberDefinition objects that contains objects that identify - * the workers that make up the work team.

        - *

        Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). - * For private workforces created using Amazon Cognito use - * CognitoMemberDefinition. For workforces created using your own OIDC identity - * provider (IdP) use OidcMemberDefinition.

        + *

        The Amazon Resource Name (ARN) of the trial.

        */ - MemberDefinitions: MemberDefinition[] | undefined; + TrialArn?: string; /** - *

        The Amazon Resource Name (ARN) that identifies the work team.

        + *

        The name of the trial as displayed. If DisplayName isn't specified, + * TrialName is displayed.

        */ - WorkteamArn: string | undefined; + DisplayName?: string; /** - *

        The Amazon Resource Name (ARN) of the workforce.

        + *

        The name of the experiment the trial is part of.

        */ - WorkforceArn?: string; + ExperimentName?: string; /** - *

        The Amazon Marketplace identifier for a vendor's work team.

        + *

        The Amazon Resource Name (ARN) of the source and, optionally, the job type.

        */ - ProductListingIds?: string[]; + Source?: TrialSource; /** - *

        A description of the work team.

        + *

        When the trial was created.

        */ - Description: string | undefined; + CreationTime?: Date; /** - *

        The URI of the labeling job's user interface. Workers open this URI to start labeling - * your data objects.

        + *

        Who created the trial.

        */ - SubDomain?: string; + CreatedBy?: UserContext; /** - *

        The date and time that the work team was created (timestamp).

        + *

        When the trial was last modified.

        */ - CreateDate?: Date; + LastModifiedTime?: Date; /** - *

        The date and time that the work team was last updated (timestamp).

        + *

        Who last modified the trial.

        */ - LastUpdatedDate?: Date; + LastModifiedBy?: UserContext; /** - *

        Configures SNS notifications of available or expiring work items for work - * teams.

        + *

        Metadata properties of the tracking entity, trial, or trial component.

        */ - NotificationConfiguration?: NotificationConfiguration; + MetadataProperties?: MetadataProperties; } -export namespace Workteam { +export namespace DescribeTrialResponse { /** * @internal */ - export const filterSensitiveLog = (obj: Workteam): any => ({ + export const filterSensitiveLog = (obj: DescribeTrialResponse): any => ({ ...obj, }); } -export interface DescribeWorkteamResponse { +export interface DescribeTrialComponentRequest { /** - *

        A Workteam instance that contains information about the work team. - *

        + *

        The name of the trial component to describe.

        */ - Workteam: Workteam | undefined; + TrialComponentName: string | undefined; } -export namespace DescribeWorkteamResponse { +export namespace DescribeTrialComponentRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeWorkteamResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeTrialComponentRequest): any => ({ ...obj, }); } /** - *

        Specifies weight and capacity values for a production variant.

        + *

        A summary of the metrics of a trial component.

        */ -export interface DesiredWeightAndCapacity { - /** - *

        The name of the variant to update.

        - */ - VariantName: string | undefined; - - /** - *

        The variant's weight.

        - */ - DesiredWeight?: number; - - /** - *

        The variant's capacity.

        - */ - DesiredInstanceCount?: number; -} - -export namespace DesiredWeightAndCapacity { +export interface TrialComponentMetricSummary { /** - * @internal + *

        The name of the metric.

        */ - export const filterSensitiveLog = (obj: DesiredWeightAndCapacity): any => ({ - ...obj, - }); -} + MetricName?: string; -/** - *

        Information of a particular device.

        - */ -export interface Device { /** - *

        The name of the device.

        + *

        The Amazon Resource Name (ARN) of the source.

        */ - DeviceName: string | undefined; + SourceArn?: string; /** - *

        Description of the device.

        + *

        When the metric was last updated.

        */ - Description?: string; + TimeStamp?: Date; /** - *

        Amazon Web Services Internet of Things (IoT) object name.

        + *

        The maximum value of the metric.

        */ - IotThingName?: string; -} + Max?: number; -export namespace Device { /** - * @internal + *

        The minimum value of the metric.

        */ - export const filterSensitiveLog = (obj: Device): any => ({ - ...obj, - }); -} + Min?: number; -/** - *

        Summary of the device fleet.

        - */ -export interface DeviceFleetSummary { /** - *

        Amazon Resource Name (ARN) of the device fleet.

        + *

        The most recent value of the metric.

        */ - DeviceFleetArn: string | undefined; + Last?: number; /** - *

        Name of the device fleet.

        + *

        The number of samples used to generate the metric.

        */ - DeviceFleetName: string | undefined; + Count?: number; /** - *

        Timestamp of when the device fleet was created.

        + *

        The average value of the metric.

        */ - CreationTime?: Date; + Avg?: number; /** - *

        Timestamp of when the device fleet was last updated.

        + *

        The standard deviation of the metric.

        */ - LastModifiedTime?: Date; + StdDev?: number; } -export namespace DeviceFleetSummary { +export namespace TrialComponentMetricSummary { /** * @internal */ - export const filterSensitiveLog = (obj: DeviceFleetSummary): any => ({ + export const filterSensitiveLog = (obj: TrialComponentMetricSummary): any => ({ ...obj, }); } /** - *

        Status of devices.

        + *

        The Amazon Resource Name (ARN) and job type of the source of a trial component.

        */ -export interface DeviceStats { +export interface TrialComponentSource { /** - *

        The number of devices connected with a heartbeat.

        + *

        The source ARN.

        */ - ConnectedDeviceCount: number | undefined; + SourceArn: string | undefined; /** - *

        The number of registered devices.

        + *

        The source job type.

        */ - RegisteredDeviceCount: number | undefined; + SourceType?: string; } -export namespace DeviceStats { +export namespace TrialComponentSource { /** * @internal */ - export const filterSensitiveLog = (obj: DeviceStats): any => ({ + export const filterSensitiveLog = (obj: TrialComponentSource): any => ({ ...obj, }); } -/** - *

        Summary of model on edge device.

        - */ -export interface EdgeModelSummary { +export interface DescribeTrialComponentResponse { /** - *

        The name of the model.

        + *

        The name of the trial component.

        */ - ModelName: string | undefined; + TrialComponentName?: string; /** - *

        The version model.

        + *

        The Amazon Resource Name (ARN) of the trial component.

        */ - ModelVersion: string | undefined; -} + TrialComponentArn?: string; -export namespace EdgeModelSummary { /** - * @internal + *

        The name of the component as displayed. If DisplayName isn't specified, + * TrialComponentName is displayed.

        */ - export const filterSensitiveLog = (obj: EdgeModelSummary): any => ({ - ...obj, - }); -} + DisplayName?: string; -/** - *

        Summary of the device.

        - */ -export interface DeviceSummary { /** - *

        The unique identifier of the device.

        + *

        The Amazon Resource Name (ARN) of the source and, optionally, the job type.

        */ - DeviceName: string | undefined; + Source?: TrialComponentSource; /** - *

        Amazon Resource Name (ARN) of the device.

        + *

        The status of the component. States include:

        + *
          + *
        • + *

          InProgress

          + *
        • + *
        • + *

          Completed

          + *
        • + *
        • + *

          Failed

          + *
        • + *
        */ - DeviceArn: string | undefined; + Status?: TrialComponentStatus; /** - *

        A description of the device.

        + *

        When the component started.

        */ - Description?: string; + StartTime?: Date; /** - *

        The name of the fleet the device belongs to.

        + *

        When the component ended.

        */ - DeviceFleetName?: string; + EndTime?: Date; /** - *

        The Amazon Web Services Internet of Things (IoT) object thing name associated with the device..

        + *

        When the component was created.

        */ - IotThingName?: string; + CreationTime?: Date; /** - *

        The timestamp of the last registration or de-reregistration.

        + *

        Who created the trial component.

        */ - RegistrationTime?: Date; + CreatedBy?: UserContext; /** - *

        The last heartbeat received from the device.

        + *

        When the component was last modified.

        */ - LatestHeartbeat?: Date; + LastModifiedTime?: Date; /** - *

        Models on the device.

        + *

        Who last modified the component.

        */ - Models?: EdgeModelSummary[]; + LastModifiedBy?: UserContext; /** - *

        Edge Manager agent version.

        + *

        The hyperparameters of the component.

        */ - AgentVersion?: string; -} + Parameters?: Record; -export namespace DeviceSummary { /** - * @internal + *

        The input artifacts of the component.

        */ - export const filterSensitiveLog = (obj: DeviceSummary): any => ({ - ...obj, - }); -} - -export enum Direction { - ASCENDANTS = "Ascendants", - BOTH = "Both", - DESCENDANTS = "Descendants", -} - -export interface DisableSagemakerServicecatalogPortfolioInput {} + InputArtifacts?: Record; -export namespace DisableSagemakerServicecatalogPortfolioInput { /** - * @internal + *

        The output artifacts of the component.

        */ - export const filterSensitiveLog = (obj: DisableSagemakerServicecatalogPortfolioInput): any => ({ - ...obj, - }); -} - -export interface DisableSagemakerServicecatalogPortfolioOutput {} + OutputArtifacts?: Record; -export namespace DisableSagemakerServicecatalogPortfolioOutput { /** - * @internal + *

        Metadata properties of the tracking entity, trial, or trial component.

        */ - export const filterSensitiveLog = (obj: DisableSagemakerServicecatalogPortfolioOutput): any => ({ - ...obj, - }); -} + MetadataProperties?: MetadataProperties; -export interface DisassociateTrialComponentRequest { /** - *

        The name of the component to disassociate from the trial.

        + *

        The metrics for the component.

        */ - TrialComponentName: string | undefined; + Metrics?: TrialComponentMetricSummary[]; /** - *

        The name of the trial to disassociate from.

        + *

        The Amazon Resource Name (ARN) of the lineage group.

        */ - TrialName: string | undefined; + LineageGroupArn?: string; } -export namespace DisassociateTrialComponentRequest { +export namespace DescribeTrialComponentResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DisassociateTrialComponentRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeTrialComponentResponse): any => ({ ...obj, + ...(obj.Parameters && { + Parameters: Object.entries(obj.Parameters).reduce( + (acc: any, [key, value]: [string, TrialComponentParameterValue]) => ({ + ...acc, + [key]: TrialComponentParameterValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } -export interface DisassociateTrialComponentResponse { +export interface DescribeUserProfileRequest { /** - *

        The ARN of the trial component.

        + *

        The domain ID.

        */ - TrialComponentArn?: string; + DomainId: string | undefined; /** - *

        The Amazon Resource Name (ARN) of the trial.

        + *

        The user profile name. This value is not case sensitive.

        */ - TrialArn?: string; + UserProfileName: string | undefined; } -export namespace DisassociateTrialComponentResponse { +export namespace DescribeUserProfileRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DisassociateTrialComponentResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeUserProfileRequest): any => ({ ...obj, }); } -/** - *

        The domain's details.

        - */ -export interface DomainDetails { +export enum UserProfileStatus { + Delete_Failed = "Delete_Failed", + Deleting = "Deleting", + Failed = "Failed", + InService = "InService", + Pending = "Pending", + Update_Failed = "Update_Failed", + Updating = "Updating", +} + +export interface DescribeUserProfileResponse { /** - *

        The domain's Amazon Resource Name (ARN).

        + *

        The ID of the domain that contains the profile.

        */ - DomainArn?: string; + DomainId?: string; /** - *

        The domain ID.

        + *

        The user profile Amazon Resource Name (ARN).

        */ - DomainId?: string; + UserProfileArn?: string; /** - *

        The domain name.

        + *

        The user profile name.

        */ - DomainName?: string; + UserProfileName?: string; /** - *

        The status.

        + *

        The ID of the user's profile in the Amazon Elastic File System (EFS) volume.

        */ - Status?: DomainStatus | string; + HomeEfsFileSystemUid?: string; /** - *

        The creation time.

        + *

        The status.

        */ - CreationTime?: Date; + Status?: UserProfileStatus | string; /** *

        The last modified time.

        @@ -4338,5153 +4272,5221 @@ export interface DomainDetails { LastModifiedTime?: Date; /** - *

        The domain's URL.

        + *

        The creation time.

        */ - Url?: string; -} + CreationTime?: Date; -export namespace DomainDetails { /** - * @internal + *

        The failure reason.

        */ - export const filterSensitiveLog = (obj: DomainDetails): any => ({ - ...obj, - }); -} + FailureReason?: string; + + /** + *

        The SSO user identifier.

        + */ + SingleSignOnUserIdentifier?: string; -/** - *

        A collection of settings that update the current configuration for the RStudioServerPro Domain-level app.

        - */ -export interface RStudioServerProDomainSettingsForUpdate { /** - *

        The execution role for the RStudioServerPro Domain-level app.

        + *

        The SSO user value.

        */ - DomainExecutionRoleArn: string | undefined; + SingleSignOnUserValue?: string; /** - *

        Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that - * the version runs on.

        + *

        A collection of settings.

        */ - DefaultResourceSpec?: ResourceSpec; + UserSettings?: UserSettings; } -export namespace RStudioServerProDomainSettingsForUpdate { +export namespace DescribeUserProfileResponse { /** * @internal */ - export const filterSensitiveLog = (obj: RStudioServerProDomainSettingsForUpdate): any => ({ + export const filterSensitiveLog = (obj: DescribeUserProfileResponse): any => ({ ...obj, }); } -/** - *

        A collection of Domain configuration settings to update.

        - */ -export interface DomainSettingsForUpdate { +export interface DescribeWorkforceRequest { /** - *

        A collection of RStudioServerPro Domain-level app settings to update.

        + *

        The name of the private workforce whose access you want to restrict. + * WorkforceName is automatically set to default when a + * workforce is created and cannot be modified.

        */ - RStudioServerProDomainSettingsForUpdate?: RStudioServerProDomainSettingsForUpdate; + WorkforceName: string | undefined; } -export namespace DomainSettingsForUpdate { +export namespace DescribeWorkforceRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DomainSettingsForUpdate): any => ({ + export const filterSensitiveLog = (obj: DescribeWorkforceRequest): any => ({ ...obj, }); } /** - *

        A directed edge connecting two lineage entities.

        + *

        Your OIDC IdP workforce configuration.

        */ -export interface Edge { +export interface OidcConfigForResponse { /** - *

        The Amazon Resource Name (ARN) of the source lineage entity of the directed edge.

        + *

        The OIDC IdP client ID used to configure your private workforce.

        */ - SourceArn?: string; + ClientId?: string; /** - *

        The Amazon Resource Name (ARN) of the destination lineage entity of the directed edge.

        + *

        The OIDC IdP issuer used to configure your private workforce.

        */ - DestinationArn?: string; + Issuer?: string; /** - *

        The type of the Association(Edge) between the source and destination. For example ContributedTo, - * Produced, or DerivedFrom.

        + *

        The OIDC IdP authorization endpoint used to configure your private workforce.

        */ - AssociationType?: AssociationEdgeType | string; + AuthorizationEndpoint?: string; + + /** + *

        The OIDC IdP token endpoint used to configure your private workforce.

        + */ + TokenEndpoint?: string; + + /** + *

        The OIDC IdP user information endpoint used to configure your private workforce.

        + */ + UserInfoEndpoint?: string; + + /** + *

        The OIDC IdP logout endpoint used to configure your private workforce.

        + */ + LogoutEndpoint?: string; + + /** + *

        The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.

        + */ + JwksUri?: string; } -export namespace Edge { +export namespace OidcConfigForResponse { /** * @internal */ - export const filterSensitiveLog = (obj: Edge): any => ({ + export const filterSensitiveLog = (obj: OidcConfigForResponse): any => ({ ...obj, }); } +export enum WorkforceStatus { + ACTIVE = "Active", + DELETING = "Deleting", + FAILED = "Failed", + INITIALIZING = "Initializing", + UPDATING = "Updating", +} + /** - *

        Status of edge devices with this model.

        + *

        A VpcConfig object that specifies the VPC that you want your workforce to connect to.

        */ -export interface EdgeModelStat { - /** - *

        The name of the model.

        - */ - ModelName: string | undefined; - - /** - *

        The model version.

        - */ - ModelVersion: string | undefined; - +export interface WorkforceVpcConfigResponse { /** - *

        The number of devices that have this model version and do not have a heart beat.

        + *

        The ID of the VPC that the workforce uses for communication.

        */ - OfflineDeviceCount: number | undefined; + VpcId: string | undefined; /** - *

        The number of devices that have this model version and have a heart beat.

        + *

        The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.

        */ - ConnectedDeviceCount: number | undefined; + SecurityGroupIds: string[] | undefined; /** - *

        The number of devices that have this model version, a heart beat, and are currently running.

        + *

        The ID of the subnets in the VPC that you want to connect.

        */ - ActiveDeviceCount: number | undefined; + Subnets: string[] | undefined; /** - *

        The number of devices with this model version and are producing sample data.

        + *

        The IDs for the VPC service endpoints of your VPC workforce when it is created and updated.

        */ - SamplingDeviceCount: number | undefined; + VpcEndpointId?: string; } -export namespace EdgeModelStat { +export namespace WorkforceVpcConfigResponse { /** * @internal */ - export const filterSensitiveLog = (obj: EdgeModelStat): any => ({ + export const filterSensitiveLog = (obj: WorkforceVpcConfigResponse): any => ({ ...obj, }); } /** - *

        Summary of edge packaging job.

        + *

        A single private workforce, which is automatically created when you create your first + * private work team. You can create one private work force in each Amazon Web Services Region. By default, + * any workforce-related API operation used in a specific region will apply to the + * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

        */ -export interface EdgePackagingJobSummary { +export interface Workforce { /** - *

        The Amazon Resource Name (ARN) of the edge packaging job.

        + *

        The name of the private workforce.

        */ - EdgePackagingJobArn: string | undefined; + WorkforceName: string | undefined; /** - *

        The name of the edge packaging job.

        + *

        The Amazon Resource Name (ARN) of the private workforce.

        */ - EdgePackagingJobName: string | undefined; + WorkforceArn: string | undefined; /** - *

        The status of the edge packaging job.

        + *

        The most recent date that was used to + * successfully add one or more IP address ranges (CIDRs) to a private workforce's + * allow list.

        */ - EdgePackagingJobStatus: EdgePackagingJobStatus | string | undefined; + LastUpdatedDate?: Date; /** - *

        The name of the SageMaker Neo compilation job.

        + *

        A list of one to ten IP address ranges (CIDRs) to be added to the + * workforce allow list. By default, a workforce isn't restricted to specific IP addresses.

        */ - CompilationJobName?: string; + SourceIpConfig?: SourceIpConfig; /** - *

        The name of the model.

        + *

        The subdomain for your OIDC Identity Provider.

        */ - ModelName?: string; + SubDomain?: string; /** - *

        The version of the model.

        + *

        The configuration of an Amazon Cognito workforce. + * A single Cognito workforce is created using and corresponds to a single + * + * Amazon Cognito user pool.

        */ - ModelVersion?: string; + CognitoConfig?: CognitoConfig; /** - *

        The timestamp of when the job was created.

        + *

        The configuration of an OIDC Identity Provider (IdP) private workforce.

        */ - CreationTime?: Date; + OidcConfig?: OidcConfigForResponse; /** - *

        The timestamp of when the edge packaging job was last updated.

        + *

        The date that the workforce is created.

        */ - LastModifiedTime?: Date; -} + CreateDate?: Date; -export namespace EdgePackagingJobSummary { /** - * @internal + *

        The configuration of a VPC workforce.

        */ - export const filterSensitiveLog = (obj: EdgePackagingJobSummary): any => ({ - ...obj, - }); -} + WorkforceVpcConfig?: WorkforceVpcConfigResponse; -/** - *

        The configurations and outcomes of an Amazon EMR step execution.

        - */ -export interface EMRStepMetadata { /** - *

        The identifier of the EMR cluster.

        + *

        The status of your workforce.

        */ - ClusterId?: string; + Status?: WorkforceStatus | string; /** - *

        The identifier of the EMR cluster step.

        + *

        The reason your workforce failed.

        */ - StepId?: string; + FailureReason?: string; +} +export namespace Workforce { /** - *

        The name of the EMR cluster step.

        + * @internal */ - StepName?: string; + export const filterSensitiveLog = (obj: Workforce): any => ({ + ...obj, + }); +} +export interface DescribeWorkforceResponse { /** - *

        The path to the log file where the cluster step's failure root cause - * is recorded.

        + *

        A single private workforce, which is automatically created when you create your first + * private work team. You can create one private work force in each Amazon Web Services Region. By default, + * any workforce-related API operation used in a specific region will apply to the + * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

        */ - LogFilePath?: string; + Workforce: Workforce | undefined; } -export namespace EMRStepMetadata { +export namespace DescribeWorkforceResponse { /** * @internal */ - export const filterSensitiveLog = (obj: EMRStepMetadata): any => ({ + export const filterSensitiveLog = (obj: DescribeWorkforceResponse): any => ({ ...obj, }); } -export interface EnableSagemakerServicecatalogPortfolioInput {} - -export namespace EnableSagemakerServicecatalogPortfolioInput { +export interface DescribeWorkteamRequest { /** - * @internal + *

        The name of the work team to return a description of.

        */ - export const filterSensitiveLog = (obj: EnableSagemakerServicecatalogPortfolioInput): any => ({ - ...obj, - }); + WorkteamName: string | undefined; } -export interface EnableSagemakerServicecatalogPortfolioOutput {} - -export namespace EnableSagemakerServicecatalogPortfolioOutput { +export namespace DescribeWorkteamRequest { /** * @internal */ - export const filterSensitiveLog = (obj: EnableSagemakerServicecatalogPortfolioOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeWorkteamRequest): any => ({ ...obj, }); } /** - *

        A schedule for a model monitoring job. For information about model monitor, see - * Amazon SageMaker Model - * Monitor.

        + *

        Provides details about a labeling work team.

        */ -export interface MonitoringSchedule { - /** - *

        The Amazon Resource Name (ARN) of the monitoring schedule.

        - */ - MonitoringScheduleArn?: string; - +export interface Workteam { /** - *

        The name of the monitoring schedule.

        + *

        The name of the work team.

        */ - MonitoringScheduleName?: string; + WorkteamName: string | undefined; /** - *

        The status of the monitoring schedule. This can be one of the following values.

        - *
          - *
        • - *

          - * PENDING - The schedule is pending being created.

          - *
        • - *
        • - *

          - * FAILED - The schedule failed.

          - *
        • - *
        • - *

          - * SCHEDULED - The schedule was successfully created.

          - *
        • - *
        • - *

          - * STOPPED - The schedule was stopped.

          - *
        • - *
        + *

        A list of MemberDefinition objects that contains objects that identify + * the workers that make up the work team.

        + *

        Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). + * For private workforces created using Amazon Cognito use + * CognitoMemberDefinition. For workforces created using your own OIDC identity + * provider (IdP) use OidcMemberDefinition.

        */ - MonitoringScheduleStatus?: ScheduleStatus | string; + MemberDefinitions: MemberDefinition[] | undefined; /** - *

        The type of the monitoring job definition to schedule.

        + *

        The Amazon Resource Name (ARN) that identifies the work team.

        */ - MonitoringType?: MonitoringType | string; + WorkteamArn: string | undefined; /** - *

        If the monitoring schedule failed, the reason it failed.

        + *

        The Amazon Resource Name (ARN) of the workforce.

        */ - FailureReason?: string; + WorkforceArn?: string; /** - *

        The time that the monitoring schedule was created.

        + *

        The Amazon Marketplace identifier for a vendor's work team.

        */ - CreationTime?: Date; + ProductListingIds?: string[]; /** - *

        The last time the monitoring schedule was changed.

        + *

        A description of the work team.

        */ - LastModifiedTime?: Date; + Description: string | undefined; /** - *

        Configures the monitoring schedule and defines the monitoring job.

        + *

        The URI of the labeling job's user interface. Workers open this URI to start labeling + * your data objects.

        */ - MonitoringScheduleConfig?: MonitoringScheduleConfig; + SubDomain?: string; /** - *

        The endpoint that hosts the model being monitored.

        + *

        The date and time that the work team was created (timestamp).

        */ - EndpointName?: string; + CreateDate?: Date; /** - *

        Summary of information about the last monitoring job to run.

        + *

        The date and time that the work team was last updated (timestamp).

        */ - LastMonitoringExecutionSummary?: MonitoringExecutionSummary; + LastUpdatedDate?: Date; /** - *

        A list of the tags associated with the monitoring schedlue. For more information, see Tagging Amazon Web Services - * resources in the Amazon Web Services General Reference Guide.

        + *

        Configures SNS notifications of available or expiring work items for work + * teams.

        */ - Tags?: Tag[]; + NotificationConfiguration?: NotificationConfiguration; } -export namespace MonitoringSchedule { +export namespace Workteam { /** * @internal */ - export const filterSensitiveLog = (obj: MonitoringSchedule): any => ({ + export const filterSensitiveLog = (obj: Workteam): any => ({ ...obj, }); } -/** - *

        A hosted endpoint for real-time inference.

        - */ -export interface Endpoint { - /** - *

        The name of the endpoint.

        - */ - EndpointName: string | undefined; - - /** - *

        The Amazon Resource Name (ARN) of the endpoint.

        - */ - EndpointArn: string | undefined; - +export interface DescribeWorkteamResponse { /** - *

        The endpoint configuration associated with the endpoint.

        + *

        A Workteam instance that contains information about the work team. + *

        */ - EndpointConfigName: string | undefined; + Workteam: Workteam | undefined; +} +export namespace DescribeWorkteamResponse { /** - *

        A list of the production variants hosted on the endpoint. Each production variant is a - * model.

        + * @internal */ - ProductionVariants?: ProductionVariantSummary[]; + export const filterSensitiveLog = (obj: DescribeWorkteamResponse): any => ({ + ...obj, + }); +} +/** + *

        Specifies weight and capacity values for a production variant.

        + */ +export interface DesiredWeightAndCapacity { /** - *

        The currently active data capture configuration used by your Endpoint.

        + *

        The name of the variant to update.

        */ - DataCaptureConfig?: DataCaptureConfigSummary; + VariantName: string | undefined; /** - *

        The status of the endpoint.

        + *

        The variant's weight.

        */ - EndpointStatus: EndpointStatus | string | undefined; + DesiredWeight?: number; /** - *

        If the endpoint failed, the reason it failed.

        + *

        The variant's capacity.

        */ - FailureReason?: string; + DesiredInstanceCount?: number; +} +export namespace DesiredWeightAndCapacity { /** - *

        The time that the endpoint was created.

        + * @internal */ - CreationTime: Date | undefined; + export const filterSensitiveLog = (obj: DesiredWeightAndCapacity): any => ({ + ...obj, + }); +} +/** + *

        Information of a particular device.

        + */ +export interface Device { /** - *

        The last time the endpoint was modified.

        + *

        The name of the device.

        */ - LastModifiedTime: Date | undefined; + DeviceName: string | undefined; /** - *

        A list of monitoring schedules for the endpoint. For information about model - * monitoring, see Amazon SageMaker Model Monitor.

        + *

        Description of the device.

        */ - MonitoringSchedules?: MonitoringSchedule[]; + Description?: string; /** - *

        A list of the tags associated with the endpoint. For more information, see Tagging Amazon Web Services - * resources in the Amazon Web Services General Reference Guide.

        + *

        Amazon Web Services Internet of Things (IoT) object name.

        */ - Tags?: Tag[]; + IotThingName?: string; } -export namespace Endpoint { +export namespace Device { /** * @internal */ - export const filterSensitiveLog = (obj: Endpoint): any => ({ + export const filterSensitiveLog = (obj: Device): any => ({ ...obj, }); } -export enum EndpointConfigSortKey { - CreationTime = "CreationTime", - Name = "Name", +export enum DeviceDeploymentStatus { + Deployed = "DEPLOYED", + Failed = "FAILED", + InProgress = "INPROGRESS", + ReadyToDeploy = "READYTODEPLOY", + Stopped = "STOPPED", + Stopping = "STOPPING", } /** - *

        Provides summary information for an endpoint configuration.

        + *

        Contains information summarizing device details and deployment status.

        */ -export interface EndpointConfigSummary { +export interface DeviceDeploymentSummary { /** - *

        The name of the endpoint configuration.

        + *

        The ARN of the edge deployment plan.

        */ - EndpointConfigName: string | undefined; + EdgeDeploymentPlanArn: string | undefined; /** - *

        The Amazon Resource Name (ARN) of the endpoint configuration.

        + *

        The name of the edge deployment plan.

        */ - EndpointConfigArn: string | undefined; + EdgeDeploymentPlanName: string | undefined; /** - *

        A timestamp that shows when the endpoint configuration was created.

        + *

        The name of the stage in the edge deployment plan.

        */ - CreationTime: Date | undefined; -} + StageName: string | undefined; -export namespace EndpointConfigSummary { /** - * @internal + *

        The name of the deployed stage.

        */ - export const filterSensitiveLog = (obj: EndpointConfigSummary): any => ({ - ...obj, - }); -} + DeployedStageName?: string; -export enum EndpointSortKey { - CreationTime = "CreationTime", - Name = "Name", - Status = "Status", -} + /** + *

        The name of the fleet to which the device belongs to.

        + */ + DeviceFleetName?: string; -/** - *

        Provides summary information for an endpoint.

        - */ -export interface EndpointSummary { /** - *

        The name of the endpoint.

        + *

        The name of the device.

        */ - EndpointName: string | undefined; + DeviceName: string | undefined; /** - *

        The Amazon Resource Name (ARN) of the endpoint.

        + *

        The ARN of the device.

        */ - EndpointArn: string | undefined; + DeviceArn: string | undefined; /** - *

        A timestamp that shows when the endpoint was created.

        + *

        The deployment status of the device.

        */ - CreationTime: Date | undefined; + DeviceDeploymentStatus?: DeviceDeploymentStatus | string; /** - *

        A timestamp that shows when the endpoint was last modified.

        + *

        The detailed error message for the deployoment status result.

        */ - LastModifiedTime: Date | undefined; + DeviceDeploymentStatusMessage?: string; /** - *

        The status of the endpoint.

        - *
          - *
        • - *

          - * OutOfService: Endpoint is not available to take incoming - * requests.

          - *
        • - *
        • - *

          - * Creating: CreateEndpoint is executing.

          - *
        • - *
        • - *

          - * Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.

          - *
        • - *
        • - *

          - * SystemUpdating: Endpoint is undergoing maintenance and cannot be - * updated or deleted or re-scaled until it has completed. This maintenance - * operation does not change any customer-specified values such as VPC config, KMS - * encryption, model, instance type, or instance count.

          - *
        • - *
        • - *

          - * RollingBack: Endpoint fails to scale up or down or change its - * variant weight and is in the process of rolling back to its previous - * configuration. Once the rollback completes, endpoint returns to an - * InService status. This transitional status only applies to an - * endpoint that has autoscaling enabled and is undergoing variant weight or - * capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called - * explicitly.

          - *
        • - *
        • - *

          - * InService: Endpoint is available to process incoming - * requests.

          - *
        • - *
        • - *

          - * Deleting: DeleteEndpoint is executing.

          - *
        • - *
        • - *

          - * Failed: Endpoint could not be created, updated, or re-scaled. Use - * DescribeEndpointOutput$FailureReason for information about - * the failure. DeleteEndpoint is the only operation that can be - * performed on a failed endpoint.

          - *
        • - *
        - *

        To get a list of endpoints with a specified status, use the ListEndpointsInput$StatusEquals filter.

        + *

        The description of the device.

        */ - EndpointStatus: EndpointStatus | string | undefined; + Description?: string; + + /** + *

        The time when the deployment on the device started.

        + */ + DeploymentStartTime?: Date; } -export namespace EndpointSummary { +export namespace DeviceDeploymentSummary { /** * @internal */ - export const filterSensitiveLog = (obj: EndpointSummary): any => ({ + export const filterSensitiveLog = (obj: DeviceDeploymentSummary): any => ({ ...obj, }); } /** - *

        The properties of an experiment as returned by the Search API.

        + *

        Summary of the device fleet.

        */ -export interface Experiment { +export interface DeviceFleetSummary { /** - *

        The name of the experiment.

        + *

        Amazon Resource Name (ARN) of the device fleet.

        */ - ExperimentName?: string; + DeviceFleetArn: string | undefined; /** - *

        The Amazon Resource Name (ARN) of the experiment.

        + *

        Name of the device fleet.

        */ - ExperimentArn?: string; + DeviceFleetName: string | undefined; /** - *

        The name of the experiment as displayed. If DisplayName isn't specified, - * ExperimentName is displayed.

        + *

        Timestamp of when the device fleet was created.

        */ - DisplayName?: string; + CreationTime?: Date; /** - *

        The source of the experiment.

        + *

        Timestamp of when the device fleet was last updated.

        */ - Source?: ExperimentSource; + LastModifiedTime?: Date; +} +export namespace DeviceFleetSummary { /** - *

        The description of the experiment.

        + * @internal */ - Description?: string; + export const filterSensitiveLog = (obj: DeviceFleetSummary): any => ({ + ...obj, + }); +} +/** + *

        Status of devices.

        + */ +export interface DeviceStats { /** - *

        When the experiment was created.

        + *

        The number of devices connected with a heartbeat.

        */ - CreationTime?: Date; + ConnectedDeviceCount: number | undefined; /** - *

        Who created the experiment.

        + *

        The number of registered devices.

        */ - CreatedBy?: UserContext; + RegisteredDeviceCount: number | undefined; +} +export namespace DeviceStats { /** - *

        When the experiment was last modified.

        + * @internal */ - LastModifiedTime?: Date; + export const filterSensitiveLog = (obj: DeviceStats): any => ({ + ...obj, + }); +} +/** + *

        Summary of model on edge device.

        + */ +export interface EdgeModelSummary { /** - *

        Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.

        + *

        The name of the model.

        */ - LastModifiedBy?: UserContext; + ModelName: string | undefined; /** - *

        The list of tags that are associated with the experiment. You can use Search API to search on the tags.

        + *

        The version model.

        */ - Tags?: Tag[]; + ModelVersion: string | undefined; } -export namespace Experiment { +export namespace EdgeModelSummary { /** * @internal */ - export const filterSensitiveLog = (obj: Experiment): any => ({ + export const filterSensitiveLog = (obj: EdgeModelSummary): any => ({ ...obj, }); } /** - *

        A summary of the properties of an experiment. To get the complete set of properties, call - * the DescribeExperiment API and provide the - * ExperimentName.

        + *

        Summary of the device.

        */ -export interface ExperimentSummary { +export interface DeviceSummary { /** - *

        The Amazon Resource Name (ARN) of the experiment.

        + *

        The unique identifier of the device.

        */ - ExperimentArn?: string; + DeviceName: string | undefined; /** - *

        The name of the experiment.

        + *

        Amazon Resource Name (ARN) of the device.

        */ - ExperimentName?: string; + DeviceArn: string | undefined; /** - *

        The name of the experiment as displayed. If DisplayName isn't specified, - * ExperimentName is displayed.

        + *

        A description of the device.

        */ - DisplayName?: string; + Description?: string; /** - *

        The source of the experiment.

        + *

        The name of the fleet the device belongs to.

        */ - ExperimentSource?: ExperimentSource; + DeviceFleetName?: string; /** - *

        When the experiment was created.

        + *

        The Amazon Web Services Internet of Things (IoT) object thing name associated with the device..

        */ - CreationTime?: Date; + IotThingName?: string; /** - *

        When the experiment was last modified.

        + *

        The timestamp of the last registration or de-reregistration.

        */ - LastModifiedTime?: Date; + RegistrationTime?: Date; + + /** + *

        The last heartbeat received from the device.

        + */ + LatestHeartbeat?: Date; + + /** + *

        Models on the device.

        + */ + Models?: EdgeModelSummary[]; + + /** + *

        Edge Manager agent version.

        + */ + AgentVersion?: string; } -export namespace ExperimentSummary { +export namespace DeviceSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ExperimentSummary): any => ({ + export const filterSensitiveLog = (obj: DeviceSummary): any => ({ ...obj, }); } -/** - *

        The container for the metadata for Fail step.

        - */ -export interface FailStepMetadata { +export enum Direction { + ASCENDANTS = "Ascendants", + BOTH = "Both", + DESCENDANTS = "Descendants", +} + +export interface DisableSagemakerServicecatalogPortfolioInput {} + +export namespace DisableSagemakerServicecatalogPortfolioInput { /** - *

        A message that you define and then is processed and rendered by - * the Fail step when the error occurs.

        + * @internal */ - ErrorMessage?: string; + export const filterSensitiveLog = (obj: DisableSagemakerServicecatalogPortfolioInput): any => ({ + ...obj, + }); } -export namespace FailStepMetadata { +export interface DisableSagemakerServicecatalogPortfolioOutput {} + +export namespace DisableSagemakerServicecatalogPortfolioOutput { /** * @internal */ - export const filterSensitiveLog = (obj: FailStepMetadata): any => ({ + export const filterSensitiveLog = (obj: DisableSagemakerServicecatalogPortfolioOutput): any => ({ ...obj, }); } -/** - *

        Amazon SageMaker Feature Store stores features in a collection called Feature Group. - * A Feature Group can be visualized as a table which has rows, - * with a unique identifier for each row where each column in the table is a feature. - * In principle, a Feature Group is composed of features and values per features.

        - */ -export interface FeatureGroup { +export interface DisassociateTrialComponentRequest { /** - *

        The Amazon Resource Name (ARN) of a FeatureGroup.

        + *

        The name of the component to disassociate from the trial.

        */ - FeatureGroupArn?: string; + TrialComponentName: string | undefined; /** - *

        The name of the FeatureGroup.

        + *

        The name of the trial to disassociate from.

        */ - FeatureGroupName?: string; + TrialName: string | undefined; +} +export namespace DisassociateTrialComponentRequest { /** - *

        The name of the Feature whose value uniquely identifies a - * Record defined in the FeatureGroup - * FeatureDefinitions.

        + * @internal */ - RecordIdentifierFeatureName?: string; + export const filterSensitiveLog = (obj: DisassociateTrialComponentRequest): any => ({ + ...obj, + }); +} +export interface DisassociateTrialComponentResponse { /** - *

        The name of the feature that stores the EventTime of a Record in a - * FeatureGroup.

        - *

        A EventTime is point in time when a new event - * occurs that corresponds to the creation or update of a Record in - * FeatureGroup. All Records in the FeatureGroup - * must have a corresponding EventTime.

        + *

        The ARN of the trial component.

        */ - EventTimeFeatureName?: string; + TrialComponentArn?: string; /** - *

        A list of Features. Each Feature must include a - * FeatureName and a FeatureType.

        - *

        Valid FeatureTypes are Integral, Fractional and - * String.

        - *

        - * FeatureNames cannot be any of the following: is_deleted, - * write_time, api_invocation_time.

        - *

        You can create up to 2,500 FeatureDefinitions per - * FeatureGroup.

        + *

        The Amazon Resource Name (ARN) of the trial.

        */ - FeatureDefinitions?: FeatureDefinition[]; + TrialArn?: string; +} +export namespace DisassociateTrialComponentResponse { /** - *

        The time a FeatureGroup was created.

        + * @internal */ - CreationTime?: Date; + export const filterSensitiveLog = (obj: DisassociateTrialComponentResponse): any => ({ + ...obj, + }); +} +/** + *

        The domain's details.

        + */ +export interface DomainDetails { /** - *

        A timestamp indicating the last time you updated the feature group.

        + *

        The domain's Amazon Resource Name (ARN).

        */ - LastModifiedTime?: Date; + DomainArn?: string; /** - *

        Use this to specify the Amazon Web Services Key Management Service (KMS) Key ID, or - * KMSKeyId, for at rest data encryption. You can turn - * OnlineStore on or off by specifying the EnableOnlineStore flag - * at General Assembly; the default value is False.

        + *

        The domain ID.

        */ - OnlineStoreConfig?: OnlineStoreConfig; + DomainId?: string; /** - *

        The configuration of an OfflineStore.

        - *

        Provide an OfflineStoreConfig in a request to - * CreateFeatureGroup to create an OfflineStore.

        - *

        To encrypt an OfflineStore using at rest data encryption, specify Amazon Web Services Key - * Management Service (KMS) key ID, or KMSKeyId, in - * S3StorageConfig.

        + *

        The domain name.

        */ - OfflineStoreConfig?: OfflineStoreConfig; + DomainName?: string; /** - *

        The Amazon Resource Name (ARN) of the IAM execution role used to create the feature - * group.

        + *

        The status.

        */ - RoleArn?: string; + Status?: DomainStatus | string; /** - *

        A FeatureGroup status.

        + *

        The creation time.

        */ - FeatureGroupStatus?: FeatureGroupStatus | string; + CreationTime?: Date; /** - *

        The status of OfflineStore.

        + *

        The last modified time.

        */ - OfflineStoreStatus?: OfflineStoreStatus; + LastModifiedTime?: Date; /** - *

        A value that indicates whether the feature group was updated successfully.

        + *

        The domain's URL.

        */ - LastUpdateStatus?: LastUpdateStatus; + Url?: string; +} +export namespace DomainDetails { /** - *

        The reason that the FeatureGroup failed to - * be replicated in the OfflineStore. This is - * failure may be due to a failure to create a FeatureGroup in - * or delete a FeatureGroup from the OfflineStore.

        + * @internal */ - FailureReason?: string; + export const filterSensitiveLog = (obj: DomainDetails): any => ({ + ...obj, + }); +} +/** + *

        A collection of settings that update the current configuration for the RStudioServerPro Domain-level app.

        + */ +export interface RStudioServerProDomainSettingsForUpdate { /** - *

        A free form description of a FeatureGroup.

        + *

        The execution role for the RStudioServerPro Domain-level app.

        */ - Description?: string; + DomainExecutionRoleArn: string | undefined; /** - *

        Tags used to define a FeatureGroup.

        + *

        Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that + * the version runs on.

        */ - Tags?: Tag[]; + DefaultResourceSpec?: ResourceSpec; } -export namespace FeatureGroup { +export namespace RStudioServerProDomainSettingsForUpdate { /** * @internal */ - export const filterSensitiveLog = (obj: FeatureGroup): any => ({ + export const filterSensitiveLog = (obj: RStudioServerProDomainSettingsForUpdate): any => ({ ...obj, }); } -export enum FeatureGroupSortBy { - CREATION_TIME = "CreationTime", - FEATURE_GROUP_STATUS = "FeatureGroupStatus", - NAME = "Name", - OFFLINE_STORE_STATUS = "OfflineStoreStatus", -} - -export enum FeatureGroupSortOrder { - ASCENDING = "Ascending", - DESCENDING = "Descending", -} - /** - *

        The name, Arn, CreationTime, FeatureGroup values, - * LastUpdatedTime and EnableOnlineStorage status of a - * FeatureGroup.

        + *

        A collection of Domain configuration settings to update.

        */ -export interface FeatureGroupSummary { +export interface DomainSettingsForUpdate { /** - *

        The name of FeatureGroup.

        + *

        A collection of RStudioServerPro Domain-level app settings to update.

        */ - FeatureGroupName: string | undefined; + RStudioServerProDomainSettingsForUpdate?: RStudioServerProDomainSettingsForUpdate; +} +export namespace DomainSettingsForUpdate { /** - *

        Unique identifier for the FeatureGroup.

        + * @internal */ - FeatureGroupArn: string | undefined; + export const filterSensitiveLog = (obj: DomainSettingsForUpdate): any => ({ + ...obj, + }); +} +/** + *

        A directed edge connecting two lineage entities.

        + */ +export interface Edge { /** - *

        A timestamp indicating the time of creation time of the FeatureGroup.

        + *

        The Amazon Resource Name (ARN) of the source lineage entity of the directed edge.

        */ - CreationTime: Date | undefined; + SourceArn?: string; /** - *

        The status of a FeatureGroup. The status can be any of the following: - * Creating, Created, CreateFail, - * Deleting or DetailFail.

        + *

        The Amazon Resource Name (ARN) of the destination lineage entity of the directed edge.

        */ - FeatureGroupStatus?: FeatureGroupStatus | string; + DestinationArn?: string; /** - *

        Notifies you if replicating data into the OfflineStore has failed. Returns - * either: Active or Blocked.

        + *

        The type of the Association(Edge) between the source and destination. For example ContributedTo, + * Produced, or DerivedFrom.

        */ - OfflineStoreStatus?: OfflineStoreStatus; + AssociationType?: AssociationEdgeType | string; } -export namespace FeatureGroupSummary { +export namespace Edge { /** * @internal */ - export const filterSensitiveLog = (obj: FeatureGroupSummary): any => ({ + export const filterSensitiveLog = (obj: Edge): any => ({ ...obj, }); } /** - *

        The metadata for a feature. It can either be metadata that you specify, or metadata that is updated automatically.

        + *

        Contains information summarizing an edge deployment plan.

        */ -export interface FeatureMetadata { +export interface EdgeDeploymentPlanSummary { /** - *

        The Amazon Resource Number (ARN) of the feature group.

        + *

        The ARN of the edge deployment plan.

        */ - FeatureGroupArn?: string; + EdgeDeploymentPlanArn: string | undefined; /** - *

        The name of the feature group containing the feature.

        + *

        The name of the edge deployment plan.

        */ - FeatureGroupName?: string; + EdgeDeploymentPlanName: string | undefined; /** - *

        The name of feature.

        + *

        The name of the device fleet used for the deployment.

        */ - FeatureName?: string; + DeviceFleetName: string | undefined; /** - *

        The data type of the feature.

        + *

        The number of edge devices with the successful deployment.

        */ - FeatureType?: FeatureType | string; + EdgeDeploymentSuccess: number | undefined; /** - *

        A timestamp indicating when the feature was created.

        + *

        The number of edge devices yet to pick up the deployment, or in progress.

        */ - CreationTime?: Date; + EdgeDeploymentPending: number | undefined; /** - *

        A timestamp indicating when the feature was last modified.

        + *

        The number of edge devices that failed the deployment.

        */ - LastModifiedTime?: Date; + EdgeDeploymentFailed: number | undefined; /** - *

        An optional description that you specify to better describe the feature.

        + *

        The time when the edge deployment plan was created.

        */ - Description?: string; + CreationTime?: Date; /** - *

        Optional key-value pairs that you specify to better describe the feature.

        + *

        The time when the edge deployment plan was last updated.

        */ - Parameters?: FeatureParameter[]; + LastModifiedTime?: Date; } -export namespace FeatureMetadata { +export namespace EdgeDeploymentPlanSummary { /** * @internal */ - export const filterSensitiveLog = (obj: FeatureMetadata): any => ({ + export const filterSensitiveLog = (obj: EdgeDeploymentPlanSummary): any => ({ ...obj, }); } -export enum Operator { - CONTAINS = "Contains", - EQUALS = "Equals", - EXISTS = "Exists", - GREATER_THAN = "GreaterThan", - GREATER_THAN_OR_EQUAL_TO = "GreaterThanOrEqualTo", - IN = "In", - LESS_THAN = "LessThan", - LESS_THAN_OR_EQUAL_TO = "LessThanOrEqualTo", - NOT_EQUALS = "NotEquals", - NOT_EXISTS = "NotExists", -} - -/** - *

        A conditional statement for a search expression that includes a resource property, a - * Boolean operator, and a value. Resources that match the statement are returned in the - * results from the Search API.

        - * - *

        If you specify a Value, but not an Operator, Amazon SageMaker uses the - * equals operator.

        - *

        In search, there are several property types:

        - *
        - *
        Metrics
        - *
        - *

        To define a metric filter, enter a value using the form - * "Metrics.", where is - * a metric name. For example, the following filter searches for training jobs - * with an "accuracy" metric greater than - * "0.9":

        - *

        - * { - *

        - *

        - * "Name": "Metrics.accuracy", - *

        - *

        - * "Operator": "GreaterThan", - *

        - *

        - * "Value": "0.9" - *

        - *

        - * } - *

        - *
        - *
        HyperParameters
        - *
        - *

        To define a hyperparameter filter, enter a value with the form - * "HyperParameters.". Decimal hyperparameter - * values are treated as a decimal in a comparison if the specified - * Value is also a decimal value. If the specified - * Value is an integer, the decimal hyperparameter values are - * treated as integers. For example, the following filter is satisfied by - * training jobs with a "learning_rate" hyperparameter that is - * less than "0.5":

        - *

        - * { - *

        - *

        - * "Name": "HyperParameters.learning_rate", - *

        - *

        - * "Operator": "LessThan", - *

        - *

        - * "Value": "0.5" - *

        - *

        - * } - *

        - *
        - *
        Tags
        - *
        - *

        To define a tag filter, enter a value with the form - * Tags..

        - *
        - *
        - */ -export interface Filter { - /** - *

        A resource property name. For example, TrainingJobName. For - * valid property names, see SearchRecord. - * You must specify a valid property for the resource.

        - */ - Name: string | undefined; - - /** - *

        A Boolean binary operator that is used to evaluate the filter. The operator field - * contains one of the following values:

        - *
        - *
        Equals
        - *
        - *

        The value of Name equals Value.

        - *
        - *
        NotEquals
        - *
        - *

        The value of Name doesn't equal Value.

        - *
        - *
        Exists
        - *
        - *

        The Name property exists.

        - *
        - *
        NotExists
        - *
        - *

        The Name property does not exist.

        - *
        - *
        GreaterThan
        - *
        - *

        The value of Name is greater than Value. - * Not supported for text properties.

        - *
        - *
        GreaterThanOrEqualTo
        - *
        - *

        The value of Name is greater than or equal to Value. - * Not supported for text properties.

        - *
        - *
        LessThan
        - *
        - *

        The value of Name is less than Value. - * Not supported for text properties.

        - *
        - *
        LessThanOrEqualTo
        - *
        - *

        The value of Name is less than or equal to Value. - * Not supported for text properties.

        - *
        - *
        In
        - *
        - *

        The value of Name is one of the comma delimited strings in - * Value. Only supported for text properties.

        - *
        - *
        Contains
        - *
        - *

        The value of Name contains the string Value. - * Only supported for text properties.

        - *

        A SearchExpression can include the Contains operator - * multiple times when the value of Name is one of the following:

        - *
          - *
        • - *

          - * Experiment.DisplayName - *

          - *
        • - *
        • - *

          - * Experiment.ExperimentName - *

          - *
        • - *
        • - *

          - * Experiment.Tags - *

          - *
        • - *
        • - *

          - * Trial.DisplayName - *

          - *
        • - *
        • - *

          - * Trial.TrialName - *

          - *
        • - *
        • - *

          - * Trial.Tags - *

          - *
        • - *
        • - *

          - * TrialComponent.DisplayName - *

          - *
        • - *
        • - *

          - * TrialComponent.TrialComponentName - *

          - *
        • - *
        • - *

          - * TrialComponent.Tags - *

          - *
        • - *
        • - *

          - * TrialComponent.InputArtifacts - *

          - *
        • - *
        • - *

          - * TrialComponent.OutputArtifacts - *

          - *
        • - *
        - *

        A SearchExpression can include only one Contains operator - * for all other values of Name. In these cases, if you include multiple - * Contains operators in the SearchExpression, the result is - * the following error message: "'CONTAINS' operator usage limit of 1 - * exceeded."

        - *
        - *
        +/** + *

        Status of edge devices with this model.

        + */ +export interface EdgeModelStat { + /** + *

        The name of the model.

        */ - Operator?: Operator | string; + ModelName: string | undefined; /** - *

        A value used with Name and Operator to determine which - * resources satisfy the filter's condition. For numerical properties, Value - * must be an integer or floating-point decimal. For timestamp properties, - * Value must be an ISO 8601 date-time string of the following format: - * YYYY-mm-dd'T'HH:MM:SS.

        + *

        The model version.

        */ - Value?: string; + ModelVersion: string | undefined; + + /** + *

        The number of devices that have this model version and do not have a heart beat.

        + */ + OfflineDeviceCount: number | undefined; + + /** + *

        The number of devices that have this model version and have a heart beat.

        + */ + ConnectedDeviceCount: number | undefined; + + /** + *

        The number of devices that have this model version, a heart beat, and are currently running.

        + */ + ActiveDeviceCount: number | undefined; + + /** + *

        The number of devices with this model version and are producing sample data.

        + */ + SamplingDeviceCount: number | undefined; } -export namespace Filter { +export namespace EdgeModelStat { /** * @internal */ - export const filterSensitiveLog = (obj: Filter): any => ({ + export const filterSensitiveLog = (obj: EdgeModelStat): any => ({ ...obj, }); } /** - *

        Contains summary information about the flow definition.

        + *

        Summary of edge packaging job.

        */ -export interface FlowDefinitionSummary { +export interface EdgePackagingJobSummary { /** - *

        The name of the flow definition.

        + *

        The Amazon Resource Name (ARN) of the edge packaging job.

        */ - FlowDefinitionName: string | undefined; + EdgePackagingJobArn: string | undefined; /** - *

        The Amazon Resource Name (ARN) of the flow definition.

        + *

        The name of the edge packaging job.

        */ - FlowDefinitionArn: string | undefined; + EdgePackagingJobName: string | undefined; /** - *

        The status of the flow definition. Valid values:

        + *

        The status of the edge packaging job.

        */ - FlowDefinitionStatus: FlowDefinitionStatus | string | undefined; + EdgePackagingJobStatus: EdgePackagingJobStatus | string | undefined; /** - *

        The timestamp when SageMaker created the flow definition.

        + *

        The name of the SageMaker Neo compilation job.

        */ - CreationTime: Date | undefined; + CompilationJobName?: string; /** - *

        The reason why the flow definition creation failed. A failure reason is returned only when the flow definition status is Failed.

        + *

        The name of the model.

        */ - FailureReason?: string; + ModelName?: string; + + /** + *

        The version of the model.

        + */ + ModelVersion?: string; + + /** + *

        The timestamp of when the job was created.

        + */ + CreationTime?: Date; + + /** + *

        The timestamp of when the edge packaging job was last updated.

        + */ + LastModifiedTime?: Date; } -export namespace FlowDefinitionSummary { +export namespace EdgePackagingJobSummary { /** * @internal */ - export const filterSensitiveLog = (obj: FlowDefinitionSummary): any => ({ + export const filterSensitiveLog = (obj: EdgePackagingJobSummary): any => ({ ...obj, }); } -export interface GetDeviceFleetReportRequest { +/** + *

        The configurations and outcomes of an Amazon EMR step execution.

        + */ +export interface EMRStepMetadata { /** - *

        The name of the fleet.

        + *

        The identifier of the EMR cluster.

        */ - DeviceFleetName: string | undefined; + ClusterId?: string; + + /** + *

        The identifier of the EMR cluster step.

        + */ + StepId?: string; + + /** + *

        The name of the EMR cluster step.

        + */ + StepName?: string; + + /** + *

        The path to the log file where the cluster step's failure root cause + * is recorded.

        + */ + LogFilePath?: string; } -export namespace GetDeviceFleetReportRequest { +export namespace EMRStepMetadata { /** * @internal */ - export const filterSensitiveLog = (obj: GetDeviceFleetReportRequest): any => ({ + export const filterSensitiveLog = (obj: EMRStepMetadata): any => ({ ...obj, }); } -export interface GetDeviceFleetReportResponse { +export interface EnableSagemakerServicecatalogPortfolioInput {} + +export namespace EnableSagemakerServicecatalogPortfolioInput { /** - *

        The Amazon Resource Name (ARN) of the device.

        + * @internal */ - DeviceFleetArn: string | undefined; + export const filterSensitiveLog = (obj: EnableSagemakerServicecatalogPortfolioInput): any => ({ + ...obj, + }); +} + +export interface EnableSagemakerServicecatalogPortfolioOutput {} +export namespace EnableSagemakerServicecatalogPortfolioOutput { /** - *

        The name of the fleet.

        + * @internal */ - DeviceFleetName: string | undefined; + export const filterSensitiveLog = (obj: EnableSagemakerServicecatalogPortfolioOutput): any => ({ + ...obj, + }); +} +/** + *

        A schedule for a model monitoring job. For information about model monitor, see + * Amazon SageMaker Model + * Monitor.

        + */ +export interface MonitoringSchedule { /** - *

        The output configuration for storing sample data collected by the fleet.

        + *

        The Amazon Resource Name (ARN) of the monitoring schedule.

        */ - OutputConfig?: EdgeOutputConfig; + MonitoringScheduleArn?: string; /** - *

        Description of the fleet.

        + *

        The name of the monitoring schedule.

        */ - Description?: string; + MonitoringScheduleName?: string; /** - *

        Timestamp of when the report was generated.

        + *

        The status of the monitoring schedule. This can be one of the following values.

        + *
          + *
        • + *

          + * PENDING - The schedule is pending being created.

          + *
        • + *
        • + *

          + * FAILED - The schedule failed.

          + *
        • + *
        • + *

          + * SCHEDULED - The schedule was successfully created.

          + *
        • + *
        • + *

          + * STOPPED - The schedule was stopped.

          + *
        • + *
        */ - ReportGenerated?: Date; + MonitoringScheduleStatus?: ScheduleStatus | string; /** - *

        Status of devices.

        + *

        The type of the monitoring job definition to schedule.

        */ - DeviceStats?: DeviceStats; + MonitoringType?: MonitoringType | string; /** - *

        The versions of Edge Manager agent deployed on the fleet.

        + *

        If the monitoring schedule failed, the reason it failed.

        */ - AgentVersions?: AgentVersion[]; + FailureReason?: string; /** - *

        Status of model on device.

        + *

        The time that the monitoring schedule was created.

        */ - ModelStats?: EdgeModelStat[]; -} + CreationTime?: Date; -export namespace GetDeviceFleetReportResponse { /** - * @internal + *

        The last time the monitoring schedule was changed.

        */ - export const filterSensitiveLog = (obj: GetDeviceFleetReportResponse): any => ({ - ...obj, - }); -} + LastModifiedTime?: Date; -export interface GetLineageGroupPolicyRequest { /** - *

        The name or Amazon Resource Name (ARN) of the lineage group.

        + *

        Configures the monitoring schedule and defines the monitoring job.

        */ - LineageGroupName: string | undefined; + MonitoringScheduleConfig?: MonitoringScheduleConfig; + + /** + *

        The endpoint that hosts the model being monitored.

        + */ + EndpointName?: string; + + /** + *

        Summary of information about the last monitoring job to run.

        + */ + LastMonitoringExecutionSummary?: MonitoringExecutionSummary; + + /** + *

        A list of the tags associated with the monitoring schedlue. For more information, see Tagging Amazon Web Services + * resources in the Amazon Web Services General Reference Guide.

        + */ + Tags?: Tag[]; } -export namespace GetLineageGroupPolicyRequest { +export namespace MonitoringSchedule { /** * @internal */ - export const filterSensitiveLog = (obj: GetLineageGroupPolicyRequest): any => ({ + export const filterSensitiveLog = (obj: MonitoringSchedule): any => ({ ...obj, }); } -export interface GetLineageGroupPolicyResponse { +/** + *

        A hosted endpoint for real-time inference.

        + */ +export interface Endpoint { /** - *

        The Amazon Resource Name (ARN) of the lineage group.

        + *

        The name of the endpoint.

        */ - LineageGroupArn?: string; + EndpointName: string | undefined; /** - *

        The resource policy that gives access to the lineage group in another account.

        + *

        The Amazon Resource Name (ARN) of the endpoint.

        */ - ResourcePolicy?: string; -} + EndpointArn: string | undefined; + + /** + *

        The endpoint configuration associated with the endpoint.

        + */ + EndpointConfigName: string | undefined; + + /** + *

        A list of the production variants hosted on the endpoint. Each production variant is a + * model.

        + */ + ProductionVariants?: ProductionVariantSummary[]; + + /** + *

        The currently active data capture configuration used by your Endpoint.

        + */ + DataCaptureConfig?: DataCaptureConfigSummary; + + /** + *

        The status of the endpoint.

        + */ + EndpointStatus: EndpointStatus | string | undefined; + + /** + *

        If the endpoint failed, the reason it failed.

        + */ + FailureReason?: string; -export namespace GetLineageGroupPolicyResponse { /** - * @internal + *

        The time that the endpoint was created.

        */ - export const filterSensitiveLog = (obj: GetLineageGroupPolicyResponse): any => ({ - ...obj, - }); -} + CreationTime: Date | undefined; -export interface GetModelPackageGroupPolicyInput { /** - *

        The name of the model group for which to get the resource policy.

        + *

        The last time the endpoint was modified.

        */ - ModelPackageGroupName: string | undefined; -} + LastModifiedTime: Date | undefined; -export namespace GetModelPackageGroupPolicyInput { /** - * @internal + *

        A list of monitoring schedules for the endpoint. For information about model + * monitoring, see Amazon SageMaker Model Monitor.

        */ - export const filterSensitiveLog = (obj: GetModelPackageGroupPolicyInput): any => ({ - ...obj, - }); -} + MonitoringSchedules?: MonitoringSchedule[]; -export interface GetModelPackageGroupPolicyOutput { /** - *

        The resource policy for the model group.

        + *

        A list of the tags associated with the endpoint. For more information, see Tagging Amazon Web Services + * resources in the Amazon Web Services General Reference Guide.

        */ - ResourcePolicy: string | undefined; + Tags?: Tag[]; } -export namespace GetModelPackageGroupPolicyOutput { +export namespace Endpoint { /** * @internal */ - export const filterSensitiveLog = (obj: GetModelPackageGroupPolicyOutput): any => ({ + export const filterSensitiveLog = (obj: Endpoint): any => ({ ...obj, }); } -export interface GetSagemakerServicecatalogPortfolioStatusInput {} +export enum EndpointConfigSortKey { + CreationTime = "CreationTime", + Name = "Name", +} -export namespace GetSagemakerServicecatalogPortfolioStatusInput { +/** + *

        Provides summary information for an endpoint configuration.

        + */ +export interface EndpointConfigSummary { /** - * @internal + *

        The name of the endpoint configuration.

        */ - export const filterSensitiveLog = (obj: GetSagemakerServicecatalogPortfolioStatusInput): any => ({ - ...obj, - }); -} + EndpointConfigName: string | undefined; -export enum SagemakerServicecatalogStatus { - DISABLED = "Disabled", - ENABLED = "Enabled", -} + /** + *

        The Amazon Resource Name (ARN) of the endpoint configuration.

        + */ + EndpointConfigArn: string | undefined; -export interface GetSagemakerServicecatalogPortfolioStatusOutput { /** - *

        Whether Service Catalog is enabled or disabled in SageMaker.

        + *

        A timestamp that shows when the endpoint configuration was created.

        */ - Status?: SagemakerServicecatalogStatus | string; + CreationTime: Date | undefined; } -export namespace GetSagemakerServicecatalogPortfolioStatusOutput { +export namespace EndpointConfigSummary { /** * @internal */ - export const filterSensitiveLog = (obj: GetSagemakerServicecatalogPortfolioStatusOutput): any => ({ + export const filterSensitiveLog = (obj: EndpointConfigSummary): any => ({ ...obj, }); } -export enum ResourceType { - ENDPOINT = "Endpoint", - EXPERIMENT = "Experiment", - EXPERIMENT_TRIAL = "ExperimentTrial", - EXPERIMENT_TRIAL_COMPONENT = "ExperimentTrialComponent", - FEATURE_GROUP = "FeatureGroup", - FEATURE_METADATA = "FeatureMetadata", - MODEL_PACKAGE = "ModelPackage", - MODEL_PACKAGE_GROUP = "ModelPackageGroup", - PIPELINE = "Pipeline", - PIPELINE_EXECUTION = "PipelineExecution", - PROJECT = "Project", - TRAINING_JOB = "TrainingJob", +export enum EndpointSortKey { + CreationTime = "CreationTime", + Name = "Name", + Status = "Status", } /** - *

        Part of the SuggestionQuery type. Specifies a hint for retrieving property - * names that begin with the specified text.

        + *

        Provides summary information for an endpoint.

        */ -export interface PropertyNameQuery { - /** - *

        Text that begins a property's name.

        - */ - PropertyNameHint: string | undefined; -} - -export namespace PropertyNameQuery { +export interface EndpointSummary { /** - * @internal + *

        The name of the endpoint.

        */ - export const filterSensitiveLog = (obj: PropertyNameQuery): any => ({ - ...obj, - }); -} + EndpointName: string | undefined; -/** - *

        Specified in the GetSearchSuggestions request. - * Limits the property names that are included in the response.

        - */ -export interface SuggestionQuery { /** - *

        Defines a property name hint. Only property - * names that begin with the specified hint are included in the response.

        + *

        The Amazon Resource Name (ARN) of the endpoint.

        */ - PropertyNameQuery?: PropertyNameQuery; -} + EndpointArn: string | undefined; -export namespace SuggestionQuery { /** - * @internal + *

        A timestamp that shows when the endpoint was created.

        */ - export const filterSensitiveLog = (obj: SuggestionQuery): any => ({ - ...obj, - }); -} + CreationTime: Date | undefined; -export interface GetSearchSuggestionsRequest { /** - *

        The name of the Amazon SageMaker resource to search for.

        + *

        A timestamp that shows when the endpoint was last modified.

        */ - Resource: ResourceType | string | undefined; + LastModifiedTime: Date | undefined; /** - *

        Limits the property names that are included in the response.

        + *

        The status of the endpoint.

        + *
          + *
        • + *

          + * OutOfService: Endpoint is not available to take incoming + * requests.

          + *
        • + *
        • + *

          + * Creating: CreateEndpoint is executing.

          + *
        • + *
        • + *

          + * Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.

          + *
        • + *
        • + *

          + * SystemUpdating: Endpoint is undergoing maintenance and cannot be + * updated or deleted or re-scaled until it has completed. This maintenance + * operation does not change any customer-specified values such as VPC config, KMS + * encryption, model, instance type, or instance count.

          + *
        • + *
        • + *

          + * RollingBack: Endpoint fails to scale up or down or change its + * variant weight and is in the process of rolling back to its previous + * configuration. Once the rollback completes, endpoint returns to an + * InService status. This transitional status only applies to an + * endpoint that has autoscaling enabled and is undergoing variant weight or + * capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called + * explicitly.

          + *
        • + *
        • + *

          + * InService: Endpoint is available to process incoming + * requests.

          + *
        • + *
        • + *

          + * Deleting: DeleteEndpoint is executing.

          + *
        • + *
        • + *

          + * Failed: Endpoint could not be created, updated, or re-scaled. Use + * DescribeEndpointOutput$FailureReason for information about + * the failure. DeleteEndpoint is the only operation that can be + * performed on a failed endpoint.

          + *
        • + *
        + *

        To get a list of endpoints with a specified status, use the ListEndpointsInput$StatusEquals filter.

        */ - SuggestionQuery?: SuggestionQuery; + EndpointStatus: EndpointStatus | string | undefined; } -export namespace GetSearchSuggestionsRequest { +export namespace EndpointSummary { /** * @internal */ - export const filterSensitiveLog = (obj: GetSearchSuggestionsRequest): any => ({ + export const filterSensitiveLog = (obj: EndpointSummary): any => ({ ...obj, }); } /** - *

        A property name returned from a GetSearchSuggestions call that specifies - * a value in the PropertyNameQuery field.

        + *

        The properties of an experiment as returned by the Search API.

        */ -export interface PropertyNameSuggestion { +export interface Experiment { /** - *

        A suggested property name based on what you entered in the search textbox in the Amazon SageMaker - * console.

        + *

        The name of the experiment.

        */ - PropertyName?: string; -} + ExperimentName?: string; -export namespace PropertyNameSuggestion { /** - * @internal + *

        The Amazon Resource Name (ARN) of the experiment.

        */ - export const filterSensitiveLog = (obj: PropertyNameSuggestion): any => ({ - ...obj, - }); -} + ExperimentArn?: string; -export interface GetSearchSuggestionsResponse { /** - *

        A list of property names for a Resource that match a - * SuggestionQuery.

        + *

        The name of the experiment as displayed. If DisplayName isn't specified, + * ExperimentName is displayed.

        */ - PropertyNameSuggestions?: PropertyNameSuggestion[]; -} + DisplayName?: string; -export namespace GetSearchSuggestionsResponse { /** - * @internal + *

        The source of the experiment.

        */ - export const filterSensitiveLog = (obj: GetSearchSuggestionsResponse): any => ({ - ...obj, - }); -} + Source?: ExperimentSource; -/** - *

        Specifies configuration details for a Git repository when the repository is - * updated.

        - */ -export interface GitConfigForUpdate { /** - *

        The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the - * credentials used to access the git repository. The secret must have a staging label of - * AWSCURRENT and must be in the following format:

        - *

        - * {"username": UserName, "password": - * Password} - *

        + *

        The description of the experiment.

        */ - SecretArn?: string; -} + Description?: string; -export namespace GitConfigForUpdate { /** - * @internal + *

        When the experiment was created.

        */ - export const filterSensitiveLog = (obj: GitConfigForUpdate): any => ({ - ...obj, - }); -} + CreationTime?: Date; -/** - *

        Container for human task user interface information.

        - */ -export interface HumanTaskUiSummary { /** - *

        The name of the human task user interface.

        + *

        Who created the experiment.

        */ - HumanTaskUiName: string | undefined; + CreatedBy?: UserContext; /** - *

        The Amazon Resource Name (ARN) of the human task user interface.

        + *

        When the experiment was last modified.

        + */ + LastModifiedTime?: Date; + + /** + *

        Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.

        */ - HumanTaskUiArn: string | undefined; + LastModifiedBy?: UserContext; /** - *

        A timestamp when SageMaker created the human task user interface.

        + *

        The list of tags that are associated with the experiment. You can use Search API to search on the tags.

        */ - CreationTime: Date | undefined; + Tags?: Tag[]; } -export namespace HumanTaskUiSummary { +export namespace Experiment { /** * @internal */ - export const filterSensitiveLog = (obj: HumanTaskUiSummary): any => ({ + export const filterSensitiveLog = (obj: Experiment): any => ({ ...obj, }); } -export enum HyperParameterTuningJobSortByOptions { - CreationTime = "CreationTime", - Name = "Name", - Status = "Status", -} - /** - *

        Provides summary information about a hyperparameter tuning job.

        + *

        A summary of the properties of an experiment. To get the complete set of properties, call + * the DescribeExperiment API and provide the + * ExperimentName.

        */ -export interface HyperParameterTuningJobSummary { - /** - *

        The name of the tuning job.

        - */ - HyperParameterTuningJobName: string | undefined; - +export interface ExperimentSummary { /** - *

        The - * Amazon - * Resource Name (ARN) of the tuning job.

        + *

        The Amazon Resource Name (ARN) of the experiment.

        */ - HyperParameterTuningJobArn: string | undefined; + ExperimentArn?: string; /** - *

        The status of the - * tuning - * job.

        + *

        The name of the experiment.

        */ - HyperParameterTuningJobStatus: HyperParameterTuningJobStatus | string | undefined; + ExperimentName?: string; /** - *

        Specifies the search strategy hyperparameter tuning uses to choose which - * hyperparameters to - * use - * for each iteration. Currently, the only valid value is - * Bayesian.

        + *

        The name of the experiment as displayed. If DisplayName isn't specified, + * ExperimentName is displayed.

        */ - Strategy: HyperParameterTuningJobStrategyType | string | undefined; + DisplayName?: string; /** - *

        The date and time that the tuning job was created.

        + *

        The source of the experiment.

        */ - CreationTime: Date | undefined; + ExperimentSource?: ExperimentSource; /** - *

        The date and time that the tuning job ended.

        + *

        When the experiment was created.

        */ - HyperParameterTuningEndTime?: Date; + CreationTime?: Date; /** - *

        The date and time that the tuning job was - * modified.

        + *

        When the experiment was last modified.

        */ LastModifiedTime?: Date; +} +export namespace ExperimentSummary { /** - *

        The TrainingJobStatusCounters object that specifies the numbers of - * training jobs, categorized by status, that this tuning job launched.

        - */ - TrainingJobStatusCounters: TrainingJobStatusCounters | undefined; - - /** - *

        The ObjectiveStatusCounters object that specifies the numbers of - * training jobs, categorized by objective metric status, that this tuning job - * launched.

        + * @internal */ - ObjectiveStatusCounters: ObjectiveStatusCounters | undefined; + export const filterSensitiveLog = (obj: ExperimentSummary): any => ({ + ...obj, + }); +} +/** + *

        The container for the metadata for Fail step.

        + */ +export interface FailStepMetadata { /** - *

        The ResourceLimits object that specifies the maximum number of - * training jobs and parallel training jobs allowed for this tuning job.

        + *

        A message that you define and then is processed and rendered by + * the Fail step when the error occurs.

        */ - ResourceLimits?: ResourceLimits; + ErrorMessage?: string; } -export namespace HyperParameterTuningJobSummary { +export namespace FailStepMetadata { /** * @internal */ - export const filterSensitiveLog = (obj: HyperParameterTuningJobSummary): any => ({ + export const filterSensitiveLog = (obj: FailStepMetadata): any => ({ ...obj, }); } /** - *

        A SageMaker image. A SageMaker image represents a set of container images that are derived from - * a common base container image. Each of these container images is represented by a SageMaker - * ImageVersion.

        + *

        Amazon SageMaker Feature Store stores features in a collection called Feature Group. + * A Feature Group can be visualized as a table which has rows, + * with a unique identifier for each row where each column in the table is a feature. + * In principle, a Feature Group is composed of features and values per features.

        */ -export interface Image { +export interface FeatureGroup { /** - *

        When the image was created.

        + *

        The Amazon Resource Name (ARN) of a FeatureGroup.

        */ - CreationTime: Date | undefined; + FeatureGroupArn?: string; /** - *

        The description of the image.

        + *

        The name of the FeatureGroup.

        */ - Description?: string; + FeatureGroupName?: string; /** - *

        The name of the image as displayed.

        + *

        The name of the Feature whose value uniquely identifies a + * Record defined in the FeatureGroup + * FeatureDefinitions.

        */ - DisplayName?: string; + RecordIdentifierFeatureName?: string; /** - *

        When a create, update, or delete operation fails, the reason for the failure.

        + *

        The name of the feature that stores the EventTime of a Record in a + * FeatureGroup.

        + *

        A EventTime is point in time when a new event + * occurs that corresponds to the creation or update of a Record in + * FeatureGroup. All Records in the FeatureGroup + * must have a corresponding EventTime.

        */ - FailureReason?: string; + EventTimeFeatureName?: string; /** - *

        The Amazon Resource Name (ARN) of the image.

        + *

        A list of Features. Each Feature must include a + * FeatureName and a FeatureType.

        + *

        Valid FeatureTypes are Integral, Fractional and + * String.

        + *

        + * FeatureNames cannot be any of the following: is_deleted, + * write_time, api_invocation_time.

        + *

        You can create up to 2,500 FeatureDefinitions per + * FeatureGroup.

        */ - ImageArn: string | undefined; + FeatureDefinitions?: FeatureDefinition[]; /** - *

        The name of the image.

        + *

        The time a FeatureGroup was created.

        */ - ImageName: string | undefined; + CreationTime?: Date; /** - *

        The status of the image.

        + *

        A timestamp indicating the last time you updated the feature group.

        */ - ImageStatus: ImageStatus | string | undefined; + LastModifiedTime?: Date; /** - *

        When the image was last modified.

        + *

        Use this to specify the Amazon Web Services Key Management Service (KMS) Key ID, or + * KMSKeyId, for at rest data encryption. You can turn + * OnlineStore on or off by specifying the EnableOnlineStore flag + * at General Assembly; the default value is False.

        */ - LastModifiedTime: Date | undefined; -} + OnlineStoreConfig?: OnlineStoreConfig; -export namespace Image { /** - * @internal + *

        The configuration of an OfflineStore.

        + *

        Provide an OfflineStoreConfig in a request to + * CreateFeatureGroup to create an OfflineStore.

        + *

        To encrypt an OfflineStore using at rest data encryption, specify Amazon Web Services Key + * Management Service (KMS) key ID, or KMSKeyId, in + * S3StorageConfig.

        */ - export const filterSensitiveLog = (obj: Image): any => ({ - ...obj, - }); -} - -export enum ImageSortBy { - CREATION_TIME = "CREATION_TIME", - IMAGE_NAME = "IMAGE_NAME", - LAST_MODIFIED_TIME = "LAST_MODIFIED_TIME", -} - -export enum ImageSortOrder { - ASCENDING = "ASCENDING", - DESCENDING = "DESCENDING", -} + OfflineStoreConfig?: OfflineStoreConfig; -/** - *

        A version of a SageMaker Image. A version represents an existing container - * image.

        - */ -export interface ImageVersion { /** - *

        When the version was created.

        + *

        The Amazon Resource Name (ARN) of the IAM execution role used to create the feature + * group.

        */ - CreationTime: Date | undefined; + RoleArn?: string; /** - *

        When a create or delete operation fails, the reason for the failure.

        + *

        A FeatureGroup status.

        */ - FailureReason?: string; + FeatureGroupStatus?: FeatureGroupStatus | string; /** - *

        The Amazon Resource Name (ARN) of the image the version is based on.

        + *

        The status of OfflineStore.

        */ - ImageArn: string | undefined; + OfflineStoreStatus?: OfflineStoreStatus; /** - *

        The ARN of the version.

        + *

        A value that indicates whether the feature group was updated successfully.

        */ - ImageVersionArn: string | undefined; + LastUpdateStatus?: LastUpdateStatus; /** - *

        The status of the version.

        + *

        The reason that the FeatureGroup failed to + * be replicated in the OfflineStore. This is + * failure may be due to a failure to create a FeatureGroup in + * or delete a FeatureGroup from the OfflineStore.

        */ - ImageVersionStatus: ImageVersionStatus | string | undefined; + FailureReason?: string; /** - *

        When the version was last modified.

        + *

        A free form description of a FeatureGroup.

        */ - LastModifiedTime: Date | undefined; + Description?: string; /** - *

        The version number.

        + *

        Tags used to define a FeatureGroup.

        */ - Version: number | undefined; + Tags?: Tag[]; } -export namespace ImageVersion { +export namespace FeatureGroup { /** * @internal */ - export const filterSensitiveLog = (obj: ImageVersion): any => ({ + export const filterSensitiveLog = (obj: FeatureGroup): any => ({ ...obj, }); } -export enum ImageVersionSortBy { - CREATION_TIME = "CREATION_TIME", - LAST_MODIFIED_TIME = "LAST_MODIFIED_TIME", - VERSION = "VERSION", +export enum FeatureGroupSortBy { + CREATION_TIME = "CreationTime", + FEATURE_GROUP_STATUS = "FeatureGroupStatus", + NAME = "Name", + OFFLINE_STORE_STATUS = "OfflineStoreStatus", } -export enum ImageVersionSortOrder { - ASCENDING = "ASCENDING", - DESCENDING = "DESCENDING", +export enum FeatureGroupSortOrder { + ASCENDING = "Ascending", + DESCENDING = "Descending", } -/** - *

        A structure that contains a list of recommendation jobs.

        - */ -export interface InferenceRecommendationsJob { +/** + *

        The name, Arn, CreationTime, FeatureGroup values, + * LastUpdatedTime and EnableOnlineStorage status of a + * FeatureGroup.

        + */ +export interface FeatureGroupSummary { + /** + *

        The name of FeatureGroup.

        + */ + FeatureGroupName: string | undefined; + + /** + *

        Unique identifier for the FeatureGroup.

        + */ + FeatureGroupArn: string | undefined; + + /** + *

        A timestamp indicating the time of creation time of the FeatureGroup.

        + */ + CreationTime: Date | undefined; + + /** + *

        The status of a FeatureGroup. The status can be any of the following: + * Creating, Created, CreateFail, + * Deleting or DetailFail.

        + */ + FeatureGroupStatus?: FeatureGroupStatus | string; + /** - *

        The name of the job.

        + *

        Notifies you if replicating data into the OfflineStore has failed. Returns + * either: Active or Blocked.

        */ - JobName: string | undefined; + OfflineStoreStatus?: OfflineStoreStatus; +} +export namespace FeatureGroupSummary { /** - *

        The job description.

        + * @internal */ - JobDescription: string | undefined; + export const filterSensitiveLog = (obj: FeatureGroupSummary): any => ({ + ...obj, + }); +} +/** + *

        The metadata for a feature. It can either be metadata that you specify, or metadata that is updated automatically.

        + */ +export interface FeatureMetadata { /** - *

        The recommendation job type.

        + *

        The Amazon Resource Number (ARN) of the feature group.

        */ - JobType: RecommendationJobType | string | undefined; + FeatureGroupArn?: string; /** - *

        The Amazon Resource Name (ARN) of the recommendation job.

        + *

        The name of the feature group containing the feature.

        */ - JobArn: string | undefined; + FeatureGroupName?: string; /** - *

        The status of the job.

        + *

        The name of feature.

        */ - Status: RecommendationJobStatus | string | undefined; + FeatureName?: string; /** - *

        A timestamp that shows when the job was created.

        + *

        The data type of the feature.

        */ - CreationTime: Date | undefined; + FeatureType?: FeatureType | string; /** - *

        A timestamp that shows when the job completed.

        + *

        A timestamp indicating when the feature was created.

        */ - CompletionTime?: Date; + CreationTime?: Date; /** - *

        The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker - * to perform tasks on your behalf.

        + *

        A timestamp indicating when the feature was last modified.

        */ - RoleArn: string | undefined; + LastModifiedTime?: Date; /** - *

        A timestamp that shows when the job was last modified.

        + *

        An optional description that you specify to better describe the feature.

        */ - LastModifiedTime: Date | undefined; + Description?: string; /** - *

        If the job fails, provides information why the job failed.

        + *

        Optional key-value pairs that you specify to better describe the feature.

        */ - FailureReason?: string; + Parameters?: FeatureParameter[]; } -export namespace InferenceRecommendationsJob { +export namespace FeatureMetadata { /** * @internal */ - export const filterSensitiveLog = (obj: InferenceRecommendationsJob): any => ({ + export const filterSensitiveLog = (obj: FeatureMetadata): any => ({ ...obj, }); } +export enum Operator { + CONTAINS = "Contains", + EQUALS = "Equals", + EXISTS = "Exists", + GREATER_THAN = "GreaterThan", + GREATER_THAN_OR_EQUAL_TO = "GreaterThanOrEqualTo", + IN = "In", + LESS_THAN = "LessThan", + LESS_THAN_OR_EQUAL_TO = "LessThanOrEqualTo", + NOT_EQUALS = "NotEquals", + NOT_EXISTS = "NotExists", +} + /** - *

        Provides counts for human-labeled tasks in the labeling job.

        + *

        A conditional statement for a search expression that includes a resource property, a + * Boolean operator, and a value. Resources that match the statement are returned in the + * results from the Search API.

        + * + *

        If you specify a Value, but not an Operator, Amazon SageMaker uses the + * equals operator.

        + *

        In search, there are several property types:

        + *
        + *
        Metrics
        + *
        + *

        To define a metric filter, enter a value using the form + * "Metrics.", where is + * a metric name. For example, the following filter searches for training jobs + * with an "accuracy" metric greater than + * "0.9":

        + *

        + * { + *

        + *

        + * "Name": "Metrics.accuracy", + *

        + *

        + * "Operator": "GreaterThan", + *

        + *

        + * "Value": "0.9" + *

        + *

        + * } + *

        + *
        + *
        HyperParameters
        + *
        + *

        To define a hyperparameter filter, enter a value with the form + * "HyperParameters.". Decimal hyperparameter + * values are treated as a decimal in a comparison if the specified + * Value is also a decimal value. If the specified + * Value is an integer, the decimal hyperparameter values are + * treated as integers. For example, the following filter is satisfied by + * training jobs with a "learning_rate" hyperparameter that is + * less than "0.5":

        + *

        + * { + *

        + *

        + * "Name": "HyperParameters.learning_rate", + *

        + *

        + * "Operator": "LessThan", + *

        + *

        + * "Value": "0.5" + *

        + *

        + * } + *

        + *
        + *
        Tags
        + *
        + *

        To define a tag filter, enter a value with the form + * Tags..

        + *
        + *
        */ -export interface LabelCountersForWorkteam { +export interface Filter { /** - *

        The total number of data objects labeled by a human worker.

        + *

        A resource property name. For example, TrainingJobName. For + * valid property names, see SearchRecord. + * You must specify a valid property for the resource.

        */ - HumanLabeled?: number; + Name: string | undefined; /** - *

        The total number of data objects that need to be labeled by a human worker.

        + *

        A Boolean binary operator that is used to evaluate the filter. The operator field + * contains one of the following values:

        + *
        + *
        Equals
        + *
        + *

        The value of Name equals Value.

        + *
        + *
        NotEquals
        + *
        + *

        The value of Name doesn't equal Value.

        + *
        + *
        Exists
        + *
        + *

        The Name property exists.

        + *
        + *
        NotExists
        + *
        + *

        The Name property does not exist.

        + *
        + *
        GreaterThan
        + *
        + *

        The value of Name is greater than Value. + * Not supported for text properties.

        + *
        + *
        GreaterThanOrEqualTo
        + *
        + *

        The value of Name is greater than or equal to Value. + * Not supported for text properties.

        + *
        + *
        LessThan
        + *
        + *

        The value of Name is less than Value. + * Not supported for text properties.

        + *
        + *
        LessThanOrEqualTo
        + *
        + *

        The value of Name is less than or equal to Value. + * Not supported for text properties.

        + *
        + *
        In
        + *
        + *

        The value of Name is one of the comma delimited strings in + * Value. Only supported for text properties.

        + *
        + *
        Contains
        + *
        + *

        The value of Name contains the string Value. + * Only supported for text properties.

        + *

        A SearchExpression can include the Contains operator + * multiple times when the value of Name is one of the following:

        + *
          + *
        • + *

          + * Experiment.DisplayName + *

          + *
        • + *
        • + *

          + * Experiment.ExperimentName + *

          + *
        • + *
        • + *

          + * Experiment.Tags + *

          + *
        • + *
        • + *

          + * Trial.DisplayName + *

          + *
        • + *
        • + *

          + * Trial.TrialName + *

          + *
        • + *
        • + *

          + * Trial.Tags + *

          + *
        • + *
        • + *

          + * TrialComponent.DisplayName + *

          + *
        • + *
        • + *

          + * TrialComponent.TrialComponentName + *

          + *
        • + *
        • + *

          + * TrialComponent.Tags + *

          + *
        • + *
        • + *

          + * TrialComponent.InputArtifacts + *

          + *
        • + *
        • + *

          + * TrialComponent.OutputArtifacts + *

          + *
        • + *
        + *

        A SearchExpression can include only one Contains operator + * for all other values of Name. In these cases, if you include multiple + * Contains operators in the SearchExpression, the result is + * the following error message: "'CONTAINS' operator usage limit of 1 + * exceeded."

        + *
        + *
        */ - PendingHuman?: number; + Operator?: Operator | string; /** - *

        The total number of tasks in the labeling job.

        + *

        A value used with Name and Operator to determine which + * resources satisfy the filter's condition. For numerical properties, Value + * must be an integer or floating-point decimal. For timestamp properties, + * Value must be an ISO 8601 date-time string of the following format: + * YYYY-mm-dd'T'HH:MM:SS.

        */ - Total?: number; + Value?: string; } -export namespace LabelCountersForWorkteam { +export namespace Filter { /** * @internal */ - export const filterSensitiveLog = (obj: LabelCountersForWorkteam): any => ({ + export const filterSensitiveLog = (obj: Filter): any => ({ ...obj, }); } /** - *

        Provides summary information for a work team.

        + *

        Contains summary information about the flow definition.

        */ -export interface LabelingJobForWorkteamSummary { - /** - *

        The name of the labeling job that the work team is assigned to.

        - */ - LabelingJobName?: string; - - /** - *

        A unique identifier for a labeling job. You can use this to refer to a specific - * labeling job.

        - */ - JobReferenceCode: string | undefined; - - /** - *

        The Amazon Web Services account ID of the account used to start the labeling job.

        - */ - WorkRequesterAccountId: string | undefined; - - /** - *

        The date and time that the labeling job was created.

        - */ - CreationTime: Date | undefined; - - /** - *

        Provides information about the progress of a labeling job.

        - */ - LabelCounters?: LabelCountersForWorkteam; - - /** - *

        The configured number of workers per data object.

        - */ - NumberOfHumanWorkersPerDataObject?: number; -} - -export namespace LabelingJobForWorkteamSummary { +export interface FlowDefinitionSummary { /** - * @internal + *

        The name of the flow definition.

        */ - export const filterSensitiveLog = (obj: LabelingJobForWorkteamSummary): any => ({ - ...obj, - }); -} + FlowDefinitionName: string | undefined; -/** - *

        Provides summary information about a labeling job.

        - */ -export interface LabelingJobSummary { /** - *

        The name of the labeling job.

        + *

        The Amazon Resource Name (ARN) of the flow definition.

        */ - LabelingJobName: string | undefined; + FlowDefinitionArn: string | undefined; /** - *

        The Amazon Resource Name (ARN) assigned to the labeling job when it was - * created.

        + *

        The status of the flow definition. Valid values:

        */ - LabelingJobArn: string | undefined; + FlowDefinitionStatus: FlowDefinitionStatus | string | undefined; /** - *

        The date and time that the job was created (timestamp).

        + *

        The timestamp when SageMaker created the flow definition.

        */ CreationTime: Date | undefined; /** - *

        The date and time that the job was last modified (timestamp).

        - */ - LastModifiedTime: Date | undefined; - - /** - *

        The current status of the labeling job.

        - */ - LabelingJobStatus: LabelingJobStatus | string | undefined; - - /** - *

        Counts showing the progress of the labeling job.

        - */ - LabelCounters: LabelCounters | undefined; - - /** - *

        The Amazon Resource Name (ARN) of the work team assigned to the job.

        - */ - WorkteamArn: string | undefined; - - /** - *

        The Amazon Resource Name (ARN) of a Lambda function. The function is run before each - * data object is sent to a worker.

        - */ - PreHumanTaskLambdaArn: string | undefined; - - /** - *

        The Amazon Resource Name (ARN) of the Lambda function used to consolidate the - * annotations from individual workers into a label for a data object. For more - * information, see Annotation - * Consolidation.

        - */ - AnnotationConsolidationLambdaArn?: string; - - /** - *

        If the LabelingJobStatus field is Failed, this field - * contains a description of the error.

        + *

        The reason why the flow definition creation failed. A failure reason is returned only when the flow definition status is Failed.

        */ FailureReason?: string; - - /** - *

        The location of the output produced by the labeling job.

        - */ - LabelingJobOutput?: LabelingJobOutput; - - /** - *

        Input configuration for the labeling job.

        - */ - InputConfig?: LabelingJobInputConfig; -} - -export namespace LabelingJobSummary { - /** - * @internal - */ - export const filterSensitiveLog = (obj: LabelingJobSummary): any => ({ - ...obj, - }); -} - -/** - *

        Metadata for a Lambda step.

        - */ -export interface LambdaStepMetadata { - /** - *

        The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution.

        - */ - Arn?: string; - - /** - *

        A list of the output parameters of the Lambda step.

        - */ - OutputParameters?: OutputParameter[]; } -export namespace LambdaStepMetadata { +export namespace FlowDefinitionSummary { /** * @internal */ - export const filterSensitiveLog = (obj: LambdaStepMetadata): any => ({ + export const filterSensitiveLog = (obj: FlowDefinitionSummary): any => ({ ...obj, }); } -/** - *

        Lists a summary of the properties of a lineage group. A lineage group provides a group of shareable lineage entity - * resources.

        - */ -export interface LineageGroupSummary { - /** - *

        The Amazon Resource Name (ARN) of the lineage group resource.

        - */ - LineageGroupArn?: string; - - /** - *

        The name or Amazon Resource Name (ARN) of the lineage group.

        - */ - LineageGroupName?: string; - - /** - *

        The display name of the lineage group summary.

        - */ - DisplayName?: string; - - /** - *

        The creation time of the lineage group summary.

        - */ - CreationTime?: Date; - +export interface GetDeviceFleetReportRequest { /** - *

        The last modified time of the lineage group summary.

        + *

        The name of the fleet.

        */ - LastModifiedTime?: Date; + DeviceFleetName: string | undefined; } -export namespace LineageGroupSummary { +export namespace GetDeviceFleetReportRequest { /** * @internal */ - export const filterSensitiveLog = (obj: LineageGroupSummary): any => ({ + export const filterSensitiveLog = (obj: GetDeviceFleetReportRequest): any => ({ ...obj, }); } -export enum LineageType { - ACTION = "Action", - ARTIFACT = "Artifact", - CONTEXT = "Context", - TRIAL_COMPONENT = "TrialComponent", -} - -export enum SortActionsBy { - CREATION_TIME = "CreationTime", - NAME = "Name", -} - -export enum SortOrder { - ASCENDING = "Ascending", - DESCENDING = "Descending", -} - -export interface ListActionsRequest { +export interface GetDeviceFleetReportResponse { /** - *

        A filter that returns only actions with the specified source URI.

        + *

        The Amazon Resource Name (ARN) of the device.

        */ - SourceUri?: string; + DeviceFleetArn: string | undefined; /** - *

        A filter that returns only actions of the specified type.

        + *

        The name of the fleet.

        */ - ActionType?: string; + DeviceFleetName: string | undefined; /** - *

        A filter that returns only actions created on or after the specified time.

        + *

        The output configuration for storing sample data collected by the fleet.

        */ - CreatedAfter?: Date; + OutputConfig?: EdgeOutputConfig; /** - *

        A filter that returns only actions created on or before the specified time.

        + *

        Description of the fleet.

        */ - CreatedBefore?: Date; + Description?: string; /** - *

        The property used to sort results. The default value is CreationTime.

        + *

        Timestamp of when the report was generated.

        */ - SortBy?: SortActionsBy | string; + ReportGenerated?: Date; /** - *

        The sort order. The default value is Descending.

        + *

        Status of devices.

        */ - SortOrder?: SortOrder | string; + DeviceStats?: DeviceStats; /** - *

        If the previous call to ListActions didn't return the full set of actions, - * the call returns a token for getting the next set of actions.

        + *

        The versions of Edge Manager agent deployed on the fleet.

        */ - NextToken?: string; + AgentVersions?: AgentVersion[]; /** - *

        The maximum number of actions to return in the response. The default value is 10.

        + *

        Status of model on device.

        */ - MaxResults?: number; + ModelStats?: EdgeModelStat[]; } -export namespace ListActionsRequest { +export namespace GetDeviceFleetReportResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListActionsRequest): any => ({ + export const filterSensitiveLog = (obj: GetDeviceFleetReportResponse): any => ({ ...obj, }); } -export interface ListActionsResponse { - /** - *

        A list of actions and their properties.

        - */ - ActionSummaries?: ActionSummary[]; - +export interface GetLineageGroupPolicyRequest { /** - *

        A token for getting the next set of actions, if there are any.

        + *

        The name or Amazon Resource Name (ARN) of the lineage group.

        */ - NextToken?: string; + LineageGroupName: string | undefined; } -export namespace ListActionsResponse { +export namespace GetLineageGroupPolicyRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListActionsResponse): any => ({ + export const filterSensitiveLog = (obj: GetLineageGroupPolicyRequest): any => ({ ...obj, }); } -export interface ListAlgorithmsInput { +export interface GetLineageGroupPolicyResponse { /** - *

        A filter that returns only algorithms created after the specified time - * (timestamp).

        + *

        The Amazon Resource Name (ARN) of the lineage group.

        */ - CreationTimeAfter?: Date; + LineageGroupArn?: string; /** - *

        A filter that returns only algorithms created before the specified time - * (timestamp).

        + *

        The resource policy that gives access to the lineage group in another account.

        */ - CreationTimeBefore?: Date; + ResourcePolicy?: string; +} +export namespace GetLineageGroupPolicyResponse { /** - *

        The maximum number of algorithms to return in the response.

        + * @internal */ - MaxResults?: number; + export const filterSensitiveLog = (obj: GetLineageGroupPolicyResponse): any => ({ + ...obj, + }); +} +export interface GetModelPackageGroupPolicyInput { /** - *

        A string in the algorithm name. This filter returns only algorithms whose name - * contains the specified string.

        + *

        The name of the model group for which to get the resource policy.

        */ - NameContains?: string; + ModelPackageGroupName: string | undefined; +} +export namespace GetModelPackageGroupPolicyInput { /** - *

        If the response to a previous ListAlgorithms request was truncated, the - * response includes a NextToken. To retrieve the next set of algorithms, use - * the token in the next request.

        + * @internal */ - NextToken?: string; + export const filterSensitiveLog = (obj: GetModelPackageGroupPolicyInput): any => ({ + ...obj, + }); +} +export interface GetModelPackageGroupPolicyOutput { /** - *

        The parameter by which to sort the results. The default is - * CreationTime.

        + *

        The resource policy for the model group.

        */ - SortBy?: AlgorithmSortBy | string; + ResourcePolicy: string | undefined; +} +export namespace GetModelPackageGroupPolicyOutput { /** - *

        The sort order for the results. The default is Ascending.

        + * @internal */ - SortOrder?: SortOrder | string; + export const filterSensitiveLog = (obj: GetModelPackageGroupPolicyOutput): any => ({ + ...obj, + }); } -export namespace ListAlgorithmsInput { +export interface GetSagemakerServicecatalogPortfolioStatusInput {} + +export namespace GetSagemakerServicecatalogPortfolioStatusInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListAlgorithmsInput): any => ({ + export const filterSensitiveLog = (obj: GetSagemakerServicecatalogPortfolioStatusInput): any => ({ ...obj, }); } -export interface ListAlgorithmsOutput { - /** - *

        >An array of AlgorithmSummary objects, each of which lists an - * algorithm.

        - */ - AlgorithmSummaryList: AlgorithmSummary[] | undefined; +export enum SagemakerServicecatalogStatus { + DISABLED = "Disabled", + ENABLED = "Enabled", +} +export interface GetSagemakerServicecatalogPortfolioStatusOutput { /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of - * algorithms, use it in the subsequent request.

        + *

        Whether Service Catalog is enabled or disabled in SageMaker.

        */ - NextToken?: string; + Status?: SagemakerServicecatalogStatus | string; } -export namespace ListAlgorithmsOutput { +export namespace GetSagemakerServicecatalogPortfolioStatusOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListAlgorithmsOutput): any => ({ + export const filterSensitiveLog = (obj: GetSagemakerServicecatalogPortfolioStatusOutput): any => ({ ...obj, }); } -export interface ListAppImageConfigsRequest { - /** - *

        The maximum number of AppImageConfigs to return in the response. The default value is - * 10.

        - */ - MaxResults?: number; +export enum ResourceType { + ENDPOINT = "Endpoint", + EXPERIMENT = "Experiment", + EXPERIMENT_TRIAL = "ExperimentTrial", + EXPERIMENT_TRIAL_COMPONENT = "ExperimentTrialComponent", + FEATURE_GROUP = "FeatureGroup", + FEATURE_METADATA = "FeatureMetadata", + MODEL_PACKAGE = "ModelPackage", + MODEL_PACKAGE_GROUP = "ModelPackageGroup", + PIPELINE = "Pipeline", + PIPELINE_EXECUTION = "PipelineExecution", + PROJECT = "Project", + TRAINING_JOB = "TrainingJob", +} +/** + *

        Part of the SuggestionQuery type. Specifies a hint for retrieving property + * names that begin with the specified text.

        + */ +export interface PropertyNameQuery { /** - *

        If the previous call to ListImages didn't return the full set of - * AppImageConfigs, the call returns a token for getting the next set of AppImageConfigs.

        + *

        Text that begins a property's name.

        */ - NextToken?: string; + PropertyNameHint: string | undefined; +} +export namespace PropertyNameQuery { /** - *

        A filter that returns only AppImageConfigs whose name contains the specified string.

        + * @internal */ - NameContains?: string; + export const filterSensitiveLog = (obj: PropertyNameQuery): any => ({ + ...obj, + }); +} +/** + *

        Specified in the GetSearchSuggestions request. + * Limits the property names that are included in the response.

        + */ +export interface SuggestionQuery { /** - *

        A filter that returns only AppImageConfigs created on or before the specified time.

        + *

        Defines a property name hint. Only property + * names that begin with the specified hint are included in the response.

        */ - CreationTimeBefore?: Date; + PropertyNameQuery?: PropertyNameQuery; +} +export namespace SuggestionQuery { /** - *

        A filter that returns only AppImageConfigs created on or after the specified time.

        + * @internal */ - CreationTimeAfter?: Date; + export const filterSensitiveLog = (obj: SuggestionQuery): any => ({ + ...obj, + }); +} +export interface GetSearchSuggestionsRequest { /** - *

        A filter that returns only AppImageConfigs modified on or before the specified time.

        + *

        The name of the Amazon SageMaker resource to search for.

        */ - ModifiedTimeBefore?: Date; + Resource: ResourceType | string | undefined; /** - *

        A filter that returns only AppImageConfigs modified on or after the specified time.

        + *

        Limits the property names that are included in the response.

        */ - ModifiedTimeAfter?: Date; + SuggestionQuery?: SuggestionQuery; +} +export namespace GetSearchSuggestionsRequest { /** - *

        The property used to sort results. The default value is CreationTime.

        + * @internal */ - SortBy?: AppImageConfigSortKey | string; + export const filterSensitiveLog = (obj: GetSearchSuggestionsRequest): any => ({ + ...obj, + }); +} +/** + *

        A property name returned from a GetSearchSuggestions call that specifies + * a value in the PropertyNameQuery field.

        + */ +export interface PropertyNameSuggestion { /** - *

        The sort order. The default value is Descending.

        + *

        A suggested property name based on what you entered in the search textbox in the Amazon SageMaker + * console.

        */ - SortOrder?: SortOrder | string; + PropertyName?: string; } -export namespace ListAppImageConfigsRequest { +export namespace PropertyNameSuggestion { /** * @internal */ - export const filterSensitiveLog = (obj: ListAppImageConfigsRequest): any => ({ + export const filterSensitiveLog = (obj: PropertyNameSuggestion): any => ({ ...obj, }); } -export interface ListAppImageConfigsResponse { - /** - *

        A token for getting the next set of AppImageConfigs, if there are any.

        - */ - NextToken?: string; - +export interface GetSearchSuggestionsResponse { /** - *

        A list of AppImageConfigs and their properties.

        + *

        A list of property names for a Resource that match a + * SuggestionQuery.

        */ - AppImageConfigs?: AppImageConfigDetails[]; + PropertyNameSuggestions?: PropertyNameSuggestion[]; } -export namespace ListAppImageConfigsResponse { +export namespace GetSearchSuggestionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListAppImageConfigsResponse): any => ({ + export const filterSensitiveLog = (obj: GetSearchSuggestionsResponse): any => ({ ...obj, }); } -export interface ListAppsRequest { - /** - *

        If the previous response was truncated, you will receive this token. - * Use it in your next request to receive the next set of results.

        - */ - NextToken?: string; - +/** + *

        Specifies configuration details for a Git repository when the repository is + * updated.

        + */ +export interface GitConfigForUpdate { /** - *

        Returns a list up to a specified limit.

        + *

        The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the + * credentials used to access the git repository. The secret must have a staging label of + * AWSCURRENT and must be in the following format:

        + *

        + * {"username": UserName, "password": + * Password} + *

        */ - MaxResults?: number; + SecretArn?: string; +} +export namespace GitConfigForUpdate { /** - *

        The sort order for the results. The default is Ascending.

        + * @internal */ - SortOrder?: SortOrder | string; + export const filterSensitiveLog = (obj: GitConfigForUpdate): any => ({ + ...obj, + }); +} +/** + *

        Container for human task user interface information.

        + */ +export interface HumanTaskUiSummary { /** - *

        The parameter by which to sort the results. The default is CreationTime.

        + *

        The name of the human task user interface.

        */ - SortBy?: AppSortKey | string; + HumanTaskUiName: string | undefined; /** - *

        A parameter to search for the domain ID.

        + *

        The Amazon Resource Name (ARN) of the human task user interface.

        */ - DomainIdEquals?: string; + HumanTaskUiArn: string | undefined; /** - *

        A parameter to search by user profile name.

        + *

        A timestamp when SageMaker created the human task user interface.

        */ - UserProfileNameEquals?: string; + CreationTime: Date | undefined; } -export namespace ListAppsRequest { +export namespace HumanTaskUiSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListAppsRequest): any => ({ + export const filterSensitiveLog = (obj: HumanTaskUiSummary): any => ({ ...obj, }); } -export interface ListAppsResponse { - /** - *

        The list of apps.

        - */ - Apps?: AppDetails[]; +export enum HyperParameterTuningJobSortByOptions { + CreationTime = "CreationTime", + Name = "Name", + Status = "Status", +} - /** - *

        If the previous response was truncated, you will receive this token. - * Use it in your next request to receive the next set of results.

        +/** + *

        Provides summary information about a hyperparameter tuning job.

        + */ +export interface HyperParameterTuningJobSummary { + /** + *

        The name of the tuning job.

        */ - NextToken?: string; -} + HyperParameterTuningJobName: string | undefined; -export namespace ListAppsResponse { /** - * @internal + *

        The + * Amazon + * Resource Name (ARN) of the tuning job.

        */ - export const filterSensitiveLog = (obj: ListAppsResponse): any => ({ - ...obj, - }); -} - -export enum SortArtifactsBy { - CREATION_TIME = "CreationTime", -} + HyperParameterTuningJobArn: string | undefined; -export interface ListArtifactsRequest { /** - *

        A filter that returns only artifacts with the specified source URI.

        + *

        The status of the + * tuning + * job.

        */ - SourceUri?: string; + HyperParameterTuningJobStatus: HyperParameterTuningJobStatus | string | undefined; /** - *

        A filter that returns only artifacts of the specified type.

        + *

        Specifies the search strategy hyperparameter tuning uses to choose which + * hyperparameters to + * use + * for each iteration. Currently, the only valid value is + * Bayesian.

        */ - ArtifactType?: string; + Strategy: HyperParameterTuningJobStrategyType | string | undefined; /** - *

        A filter that returns only artifacts created on or after the specified time.

        + *

        The date and time that the tuning job was created.

        */ - CreatedAfter?: Date; + CreationTime: Date | undefined; /** - *

        A filter that returns only artifacts created on or before the specified time.

        + *

        The date and time that the tuning job ended.

        */ - CreatedBefore?: Date; + HyperParameterTuningEndTime?: Date; /** - *

        The property used to sort results. The default value is CreationTime.

        + *

        The date and time that the tuning job was + * modified.

        */ - SortBy?: SortArtifactsBy | string; + LastModifiedTime?: Date; /** - *

        The sort order. The default value is Descending.

        + *

        The TrainingJobStatusCounters object that specifies the numbers of + * training jobs, categorized by status, that this tuning job launched.

        */ - SortOrder?: SortOrder | string; + TrainingJobStatusCounters: TrainingJobStatusCounters | undefined; /** - *

        If the previous call to ListArtifacts didn't return the full set of artifacts, - * the call returns a token for getting the next set of artifacts.

        + *

        The ObjectiveStatusCounters object that specifies the numbers of + * training jobs, categorized by objective metric status, that this tuning job + * launched.

        */ - NextToken?: string; + ObjectiveStatusCounters: ObjectiveStatusCounters | undefined; /** - *

        The maximum number of artifacts to return in the response. The default value is 10.

        + *

        The ResourceLimits object that specifies the maximum number of + * training jobs and parallel training jobs allowed for this tuning job.

        */ - MaxResults?: number; + ResourceLimits?: ResourceLimits; } -export namespace ListArtifactsRequest { +export namespace HyperParameterTuningJobSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListArtifactsRequest): any => ({ + export const filterSensitiveLog = (obj: HyperParameterTuningJobSummary): any => ({ ...obj, }); } -export interface ListArtifactsResponse { +/** + *

        A SageMaker image. A SageMaker image represents a set of container images that are derived from + * a common base container image. Each of these container images is represented by a SageMaker + * ImageVersion.

        + */ +export interface Image { /** - *

        A list of artifacts and their properties.

        + *

        When the image was created.

        */ - ArtifactSummaries?: ArtifactSummary[]; + CreationTime: Date | undefined; /** - *

        A token for getting the next set of artifacts, if there are any.

        + *

        The description of the image.

        */ - NextToken?: string; -} + Description?: string; -export namespace ListArtifactsResponse { /** - * @internal + *

        The name of the image as displayed.

        */ - export const filterSensitiveLog = (obj: ListArtifactsResponse): any => ({ - ...obj, - }); -} - -export enum SortAssociationsBy { - CREATION_TIME = "CreationTime", - DESTINATION_ARN = "DestinationArn", - DESTINATION_TYPE = "DestinationType", - SOURCE_ARN = "SourceArn", - SOURCE_TYPE = "SourceType", -} + DisplayName?: string; -export interface ListAssociationsRequest { /** - *

        A filter that returns only associations with the specified source ARN.

        + *

        When a create, update, or delete operation fails, the reason for the failure.

        */ - SourceArn?: string; + FailureReason?: string; /** - *

        A filter that returns only associations with the specified destination Amazon Resource Name (ARN).

        + *

        The Amazon Resource Name (ARN) of the image.

        */ - DestinationArn?: string; + ImageArn: string | undefined; /** - *

        A filter that returns only associations with the specified source type.

        + *

        The name of the image.

        */ - SourceType?: string; + ImageName: string | undefined; /** - *

        A filter that returns only associations with the specified destination type.

        + *

        The status of the image.

        */ - DestinationType?: string; + ImageStatus: ImageStatus | string | undefined; /** - *

        A filter that returns only associations of the specified type.

        + *

        When the image was last modified.

        */ - AssociationType?: AssociationEdgeType | string; + LastModifiedTime: Date | undefined; +} +export namespace Image { /** - *

        A filter that returns only associations created on or after the specified time.

        + * @internal */ - CreatedAfter?: Date; + export const filterSensitiveLog = (obj: Image): any => ({ + ...obj, + }); +} - /** - *

        A filter that returns only associations created on or before the specified time.

        - */ - CreatedBefore?: Date; +export enum ImageSortBy { + CREATION_TIME = "CREATION_TIME", + IMAGE_NAME = "IMAGE_NAME", + LAST_MODIFIED_TIME = "LAST_MODIFIED_TIME", +} + +export enum ImageSortOrder { + ASCENDING = "ASCENDING", + DESCENDING = "DESCENDING", +} +/** + *

        A version of a SageMaker Image. A version represents an existing container + * image.

        + */ +export interface ImageVersion { /** - *

        The property used to sort results. The default value is CreationTime.

        + *

        When the version was created.

        */ - SortBy?: SortAssociationsBy | string; + CreationTime: Date | undefined; /** - *

        The sort order. The default value is Descending.

        + *

        When a create or delete operation fails, the reason for the failure.

        */ - SortOrder?: SortOrder | string; + FailureReason?: string; /** - *

        If the previous call to ListAssociations didn't return the full set of associations, - * the call returns a token for getting the next set of associations.

        + *

        The Amazon Resource Name (ARN) of the image the version is based on.

        */ - NextToken?: string; + ImageArn: string | undefined; /** - *

        The maximum number of associations to return in the response. The default value is 10.

        + *

        The ARN of the version.

        */ - MaxResults?: number; -} + ImageVersionArn: string | undefined; -export namespace ListAssociationsRequest { /** - * @internal + *

        The status of the version.

        */ - export const filterSensitiveLog = (obj: ListAssociationsRequest): any => ({ - ...obj, - }); -} + ImageVersionStatus: ImageVersionStatus | string | undefined; -export interface ListAssociationsResponse { /** - *

        A list of associations and their properties.

        + *

        When the version was last modified.

        */ - AssociationSummaries?: AssociationSummary[]; + LastModifiedTime: Date | undefined; /** - *

        A token for getting the next set of associations, if there are any.

        + *

        The version number.

        */ - NextToken?: string; + Version: number | undefined; } -export namespace ListAssociationsResponse { +export namespace ImageVersion { /** * @internal */ - export const filterSensitiveLog = (obj: ListAssociationsResponse): any => ({ + export const filterSensitiveLog = (obj: ImageVersion): any => ({ ...obj, }); } -export interface ListAutoMLJobsRequest { +export enum ImageVersionSortBy { + CREATION_TIME = "CREATION_TIME", + LAST_MODIFIED_TIME = "LAST_MODIFIED_TIME", + VERSION = "VERSION", +} + +export enum ImageVersionSortOrder { + ASCENDING = "ASCENDING", + DESCENDING = "DESCENDING", +} + +/** + *

        A structure that contains a list of recommendation jobs.

        + */ +export interface InferenceRecommendationsJob { /** - *

        Request a list of jobs, using a filter for time.

        + *

        The name of the job.

        */ - CreationTimeAfter?: Date; + JobName: string | undefined; /** - *

        Request a list of jobs, using a filter for time.

        + *

        The job description.

        */ - CreationTimeBefore?: Date; + JobDescription: string | undefined; /** - *

        Request a list of jobs, using a filter for time.

        + *

        The recommendation job type.

        */ - LastModifiedTimeAfter?: Date; + JobType: RecommendationJobType | string | undefined; /** - *

        Request a list of jobs, using a filter for time.

        + *

        The Amazon Resource Name (ARN) of the recommendation job.

        */ - LastModifiedTimeBefore?: Date; + JobArn: string | undefined; /** - *

        Request a list of jobs, using a search filter for name.

        + *

        The status of the job.

        */ - NameContains?: string; + Status: RecommendationJobStatus | string | undefined; /** - *

        Request a list of jobs, using a filter for status.

        + *

        A timestamp that shows when the job was created.

        */ - StatusEquals?: AutoMLJobStatus | string; + CreationTime: Date | undefined; /** - *

        The sort order for the results. The default is Descending.

        + *

        A timestamp that shows when the job completed.

        */ - SortOrder?: AutoMLSortOrder | string; + CompletionTime?: Date; /** - *

        The parameter by which to sort the results. The default is Name.

        + *

        The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker + * to perform tasks on your behalf.

        */ - SortBy?: AutoMLSortBy | string; + RoleArn: string | undefined; /** - *

        Request a list of jobs up to a specified limit.

        + *

        A timestamp that shows when the job was last modified.

        */ - MaxResults?: number; + LastModifiedTime: Date | undefined; /** - *

        If the previous response was truncated, you receive this token. Use it in your next - * request to receive the next set of results.

        + *

        If the job fails, provides information why the job failed.

        */ - NextToken?: string; + FailureReason?: string; } -export namespace ListAutoMLJobsRequest { +export namespace InferenceRecommendationsJob { /** * @internal */ - export const filterSensitiveLog = (obj: ListAutoMLJobsRequest): any => ({ + export const filterSensitiveLog = (obj: InferenceRecommendationsJob): any => ({ ...obj, }); } -export interface ListAutoMLJobsResponse { +/** + *

        Provides counts for human-labeled tasks in the labeling job.

        + */ +export interface LabelCountersForWorkteam { + /** + *

        The total number of data objects labeled by a human worker.

        + */ + HumanLabeled?: number; + /** - *

        Returns a summary list of jobs.

        + *

        The total number of data objects that need to be labeled by a human worker.

        */ - AutoMLJobSummaries: AutoMLJobSummary[] | undefined; + PendingHuman?: number; /** - *

        If the previous response was truncated, you receive this token. Use it in your next - * request to receive the next set of results.

        + *

        The total number of tasks in the labeling job.

        */ - NextToken?: string; + Total?: number; } -export namespace ListAutoMLJobsResponse { +export namespace LabelCountersForWorkteam { /** * @internal */ - export const filterSensitiveLog = (obj: ListAutoMLJobsResponse): any => ({ + export const filterSensitiveLog = (obj: LabelCountersForWorkteam): any => ({ ...obj, }); } -export interface ListCandidatesForAutoMLJobRequest { - /** - *

        List the candidates created for the job by providing the job's name.

        - */ - AutoMLJobName: string | undefined; - +/** + *

        Provides summary information for a work team.

        + */ +export interface LabelingJobForWorkteamSummary { /** - *

        List the candidates for the job and filter by status.

        + *

        The name of the labeling job that the work team is assigned to.

        */ - StatusEquals?: CandidateStatus | string; + LabelingJobName?: string; /** - *

        List the candidates for the job and filter by candidate name.

        + *

        A unique identifier for a labeling job. You can use this to refer to a specific + * labeling job.

        */ - CandidateNameEquals?: string; + JobReferenceCode: string | undefined; /** - *

        The sort order for the results. The default is Ascending.

        + *

        The Amazon Web Services account ID of the account used to start the labeling job.

        */ - SortOrder?: AutoMLSortOrder | string; + WorkRequesterAccountId: string | undefined; /** - *

        The parameter by which to sort the results. The default is - * Descending.

        + *

        The date and time that the labeling job was created.

        */ - SortBy?: CandidateSortBy | string; + CreationTime: Date | undefined; /** - *

        List the job's candidates up to a specified limit.

        + *

        Provides information about the progress of a labeling job.

        */ - MaxResults?: number; + LabelCounters?: LabelCountersForWorkteam; /** - *

        If the previous response was truncated, you receive this token. Use it in your next - * request to receive the next set of results.

        + *

        The configured number of workers per data object.

        */ - NextToken?: string; + NumberOfHumanWorkersPerDataObject?: number; } -export namespace ListCandidatesForAutoMLJobRequest { +export namespace LabelingJobForWorkteamSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListCandidatesForAutoMLJobRequest): any => ({ + export const filterSensitiveLog = (obj: LabelingJobForWorkteamSummary): any => ({ ...obj, }); } -export interface ListCandidatesForAutoMLJobResponse { +/** + *

        Provides summary information about a labeling job.

        + */ +export interface LabelingJobSummary { /** - *

        Summaries about the AutoMLCandidates.

        + *

        The name of the labeling job.

        */ - Candidates: AutoMLCandidate[] | undefined; + LabelingJobName: string | undefined; /** - *

        If the previous response was truncated, you receive this token. Use it in your next - * request to receive the next set of results.

        + *

        The Amazon Resource Name (ARN) assigned to the labeling job when it was + * created.

        */ - NextToken?: string; -} + LabelingJobArn: string | undefined; -export namespace ListCandidatesForAutoMLJobResponse { /** - * @internal + *

        The date and time that the job was created (timestamp).

        */ - export const filterSensitiveLog = (obj: ListCandidatesForAutoMLJobResponse): any => ({ - ...obj, - }); -} + CreationTime: Date | undefined; -export interface ListCodeRepositoriesInput { /** - *

        A filter that returns only Git repositories that were created after the specified - * time.

        + *

        The date and time that the job was last modified (timestamp).

        */ - CreationTimeAfter?: Date; + LastModifiedTime: Date | undefined; /** - *

        A filter that returns only Git repositories that were created before the specified - * time.

        + *

        The current status of the labeling job.

        */ - CreationTimeBefore?: Date; + LabelingJobStatus: LabelingJobStatus | string | undefined; /** - *

        A filter that returns only Git repositories that were last modified after the - * specified time.

        + *

        Counts showing the progress of the labeling job.

        */ - LastModifiedTimeAfter?: Date; + LabelCounters: LabelCounters | undefined; /** - *

        A filter that returns only Git repositories that were last modified before the - * specified time.

        + *

        The Amazon Resource Name (ARN) of the work team assigned to the job.

        */ - LastModifiedTimeBefore?: Date; + WorkteamArn: string | undefined; /** - *

        The maximum number of Git repositories to return in the response.

        + *

        The Amazon Resource Name (ARN) of a Lambda function. The function is run before each + * data object is sent to a worker.

        */ - MaxResults?: number; + PreHumanTaskLambdaArn: string | undefined; /** - *

        A string in the Git repositories name. This filter returns only repositories whose - * name contains the specified string.

        + *

        The Amazon Resource Name (ARN) of the Lambda function used to consolidate the + * annotations from individual workers into a label for a data object. For more + * information, see Annotation + * Consolidation.

        */ - NameContains?: string; + AnnotationConsolidationLambdaArn?: string; /** - *

        If the result of a ListCodeRepositoriesOutput request was truncated, the - * response includes a NextToken. To get the next set of Git repositories, use - * the token in the next request.

        + *

        If the LabelingJobStatus field is Failed, this field + * contains a description of the error.

        */ - NextToken?: string; + FailureReason?: string; /** - *

        The field to sort results by. The default is Name.

        + *

        The location of the output produced by the labeling job.

        */ - SortBy?: CodeRepositorySortBy | string; + LabelingJobOutput?: LabelingJobOutput; /** - *

        The sort order for results. The default is Ascending.

        + *

        Input configuration for the labeling job.

        */ - SortOrder?: CodeRepositorySortOrder | string; + InputConfig?: LabelingJobInputConfig; } -export namespace ListCodeRepositoriesInput { +export namespace LabelingJobSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListCodeRepositoriesInput): any => ({ + export const filterSensitiveLog = (obj: LabelingJobSummary): any => ({ ...obj, }); } -export interface ListCodeRepositoriesOutput { +/** + *

        Metadata for a Lambda step.

        + */ +export interface LambdaStepMetadata { /** - *

        Gets a list of summaries of the Git repositories. Each summary specifies the following - * values for the repository:

        - *
          - *
        • - *

          Name

          - *
        • - *
        • - *

          Amazon Resource Name (ARN)

          - *
        • - *
        • - *

          Creation time

          - *
        • - *
        • - *

          Last modified time

          - *
        • - *
        • - *

          Configuration information, including the URL location of the repository and - * the ARN of the Amazon Web Services Secrets Manager secret that contains the credentials used - * to access the repository.

          - *
        • - *
        + *

        The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution.

        */ - CodeRepositorySummaryList: CodeRepositorySummary[] | undefined; + Arn?: string; /** - *

        If the result of a ListCodeRepositoriesOutput request was truncated, the - * response includes a NextToken. To get the next set of Git repositories, use - * the token in the next request.

        + *

        A list of the output parameters of the Lambda step.

        */ - NextToken?: string; + OutputParameters?: OutputParameter[]; } -export namespace ListCodeRepositoriesOutput { +export namespace LambdaStepMetadata { /** * @internal */ - export const filterSensitiveLog = (obj: ListCodeRepositoriesOutput): any => ({ + export const filterSensitiveLog = (obj: LambdaStepMetadata): any => ({ ...obj, }); } -export enum ListCompilationJobsSortBy { - CREATION_TIME = "CreationTime", - NAME = "Name", - STATUS = "Status", -} +/** + *

        Lists a summary of the properties of a lineage group. A lineage group provides a group of shareable lineage entity + * resources.

        + */ +export interface LineageGroupSummary { + /** + *

        The Amazon Resource Name (ARN) of the lineage group resource.

        + */ + LineageGroupArn?: string; -export interface ListCompilationJobsRequest { /** - *

        If the result of the previous ListCompilationJobs request was truncated, - * the response includes a NextToken. To retrieve the next set of model - * compilation jobs, use the token in the next request.

        + *

        The name or Amazon Resource Name (ARN) of the lineage group.

        */ - NextToken?: string; + LineageGroupName?: string; /** - *

        The maximum number of model compilation jobs to return in the response.

        + *

        The display name of the lineage group summary.

        */ - MaxResults?: number; + DisplayName?: string; /** - *

        A filter that returns the model compilation jobs that were created after a specified - * time.

        + *

        The creation time of the lineage group summary.

        */ - CreationTimeAfter?: Date; + CreationTime?: Date; /** - *

        A filter that returns the model compilation jobs that were created before a specified - * time.

        + *

        The last modified time of the lineage group summary.

        */ - CreationTimeBefore?: Date; + LastModifiedTime?: Date; +} +export namespace LineageGroupSummary { /** - *

        A filter that returns the model compilation jobs that were modified after a specified - * time.

        + * @internal */ - LastModifiedTimeAfter?: Date; + export const filterSensitiveLog = (obj: LineageGroupSummary): any => ({ + ...obj, + }); +} + +export enum LineageType { + ACTION = "Action", + ARTIFACT = "Artifact", + CONTEXT = "Context", + TRIAL_COMPONENT = "TrialComponent", +} + +export enum SortActionsBy { + CREATION_TIME = "CreationTime", + NAME = "Name", +} + +export enum SortOrder { + ASCENDING = "Ascending", + DESCENDING = "Descending", +} +export interface ListActionsRequest { /** - *

        A filter that returns the model compilation jobs that were modified before a specified - * time.

        + *

        A filter that returns only actions with the specified source URI.

        */ - LastModifiedTimeBefore?: Date; + SourceUri?: string; + + /** + *

        A filter that returns only actions of the specified type.

        + */ + ActionType?: string; + + /** + *

        A filter that returns only actions created on or after the specified time.

        + */ + CreatedAfter?: Date; + + /** + *

        A filter that returns only actions created on or before the specified time.

        + */ + CreatedBefore?: Date; /** - *

        A filter that returns the model compilation jobs whose name contains a specified - * string.

        + *

        The property used to sort results. The default value is CreationTime.

        */ - NameContains?: string; + SortBy?: SortActionsBy | string; /** - *

        A filter that retrieves model compilation jobs with a specific DescribeCompilationJobResponse$CompilationJobStatus status.

        + *

        The sort order. The default value is Descending.

        */ - StatusEquals?: CompilationJobStatus | string; + SortOrder?: SortOrder | string; /** - *

        The field by which to sort results. The default is CreationTime.

        + *

        If the previous call to ListActions didn't return the full set of actions, + * the call returns a token for getting the next set of actions.

        */ - SortBy?: ListCompilationJobsSortBy | string; + NextToken?: string; /** - *

        The sort order for results. The default is Ascending.

        + *

        The maximum number of actions to return in the response. The default value is 10.

        */ - SortOrder?: SortOrder | string; + MaxResults?: number; } -export namespace ListCompilationJobsRequest { +export namespace ListActionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListCompilationJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListActionsRequest): any => ({ ...obj, }); } -export interface ListCompilationJobsResponse { +export interface ListActionsResponse { /** - *

        An array of CompilationJobSummary objects, each describing a model - * compilation job.

        + *

        A list of actions and their properties.

        */ - CompilationJobSummaries: CompilationJobSummary[] | undefined; + ActionSummaries?: ActionSummary[]; /** - *

        If the response is truncated, Amazon SageMaker returns this NextToken. To retrieve - * the next set of model compilation jobs, use this token in the next request.

        + *

        A token for getting the next set of actions, if there are any.

        */ NextToken?: string; } -export namespace ListCompilationJobsResponse { +export namespace ListActionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListCompilationJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListActionsResponse): any => ({ ...obj, }); } -export enum SortContextsBy { - CREATION_TIME = "CreationTime", - NAME = "Name", -} - -export interface ListContextsRequest { - /** - *

        A filter that returns only contexts with the specified source URI.

        - */ - SourceUri?: string; - +export interface ListAlgorithmsInput { /** - *

        A filter that returns only contexts of the specified type.

        + *

        A filter that returns only algorithms created after the specified time + * (timestamp).

        */ - ContextType?: string; + CreationTimeAfter?: Date; /** - *

        A filter that returns only contexts created on or after the specified time.

        + *

        A filter that returns only algorithms created before the specified time + * (timestamp).

        */ - CreatedAfter?: Date; + CreationTimeBefore?: Date; /** - *

        A filter that returns only contexts created on or before the specified time.

        + *

        The maximum number of algorithms to return in the response.

        */ - CreatedBefore?: Date; + MaxResults?: number; /** - *

        The property used to sort results. The default value is CreationTime.

        + *

        A string in the algorithm name. This filter returns only algorithms whose name + * contains the specified string.

        */ - SortBy?: SortContextsBy | string; + NameContains?: string; /** - *

        The sort order. The default value is Descending.

        + *

        If the response to a previous ListAlgorithms request was truncated, the + * response includes a NextToken. To retrieve the next set of algorithms, use + * the token in the next request.

        */ - SortOrder?: SortOrder | string; + NextToken?: string; /** - *

        If the previous call to ListContexts didn't return the full set of contexts, - * the call returns a token for getting the next set of contexts.

        + *

        The parameter by which to sort the results. The default is + * CreationTime.

        */ - NextToken?: string; + SortBy?: AlgorithmSortBy | string; /** - *

        The maximum number of contexts to return in the response. The default value is 10.

        + *

        The sort order for the results. The default is Ascending.

        */ - MaxResults?: number; + SortOrder?: SortOrder | string; } -export namespace ListContextsRequest { +export namespace ListAlgorithmsInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListContextsRequest): any => ({ + export const filterSensitiveLog = (obj: ListAlgorithmsInput): any => ({ ...obj, }); } -export interface ListContextsResponse { +export interface ListAlgorithmsOutput { /** - *

        A list of contexts and their properties.

        + *

        >An array of AlgorithmSummary objects, each of which lists an + * algorithm.

        */ - ContextSummaries?: ContextSummary[]; + AlgorithmSummaryList: AlgorithmSummary[] | undefined; /** - *

        A token for getting the next set of contexts, if there are any.

        + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of + * algorithms, use it in the subsequent request.

        */ NextToken?: string; } -export namespace ListContextsResponse { +export namespace ListAlgorithmsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListContextsResponse): any => ({ + export const filterSensitiveLog = (obj: ListAlgorithmsOutput): any => ({ ...obj, }); } -export enum MonitoringJobDefinitionSortKey { - CREATION_TIME = "CreationTime", - NAME = "Name", -} +export interface ListAppImageConfigsRequest { + /** + *

        The maximum number of AppImageConfigs to return in the response. The default value is + * 10.

        + */ + MaxResults?: number; -export interface ListDataQualityJobDefinitionsRequest { /** - *

        A filter that lists the data quality job definitions associated with the specified - * endpoint.

        + *

        If the previous call to ListImages didn't return the full set of + * AppImageConfigs, the call returns a token for getting the next set of AppImageConfigs.

        */ - EndpointName?: string; + NextToken?: string; /** - *

        The field to sort results by. The default is CreationTime.

        + *

        A filter that returns only AppImageConfigs whose name contains the specified string.

        */ - SortBy?: MonitoringJobDefinitionSortKey | string; + NameContains?: string; /** - *

        The sort order for results. The default is Descending.

        + *

        A filter that returns only AppImageConfigs created on or before the specified time.

        */ - SortOrder?: SortOrder | string; + CreationTimeBefore?: Date; /** - *

        If the result of the previous ListDataQualityJobDefinitions request was - * truncated, the response includes a NextToken. To retrieve the next set of - * transform jobs, use the token in the next request.>

        + *

        A filter that returns only AppImageConfigs created on or after the specified time.

        */ - NextToken?: string; + CreationTimeAfter?: Date; /** - *

        The maximum number of data quality monitoring job definitions to return in the - * response.

        + *

        A filter that returns only AppImageConfigs modified on or before the specified time.

        */ - MaxResults?: number; + ModifiedTimeBefore?: Date; /** - *

        A string in the data quality monitoring job definition name. This filter returns only - * data quality monitoring job definitions whose name contains the specified string.

        + *

        A filter that returns only AppImageConfigs modified on or after the specified time.

        */ - NameContains?: string; + ModifiedTimeAfter?: Date; /** - *

        A filter that returns only data quality monitoring job definitions created before the - * specified time.

        + *

        The property used to sort results. The default value is CreationTime.

        */ - CreationTimeBefore?: Date; + SortBy?: AppImageConfigSortKey | string; /** - *

        A filter that returns only data quality monitoring job definitions created after the - * specified time.

        + *

        The sort order. The default value is Descending.

        */ - CreationTimeAfter?: Date; + SortOrder?: SortOrder | string; } -export namespace ListDataQualityJobDefinitionsRequest { +export namespace ListAppImageConfigsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListDataQualityJobDefinitionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListAppImageConfigsRequest): any => ({ ...obj, }); } -/** - *

        Summary information about a monitoring job.

        - */ -export interface MonitoringJobDefinitionSummary { +export interface ListAppImageConfigsResponse { /** - *

        The name of the monitoring job.

        + *

        A token for getting the next set of AppImageConfigs, if there are any.

        */ - MonitoringJobDefinitionName: string | undefined; + NextToken?: string; /** - *

        The Amazon Resource Name (ARN) of the monitoring job.

        + *

        A list of AppImageConfigs and their properties.

        */ - MonitoringJobDefinitionArn: string | undefined; + AppImageConfigs?: AppImageConfigDetails[]; +} +export namespace ListAppImageConfigsResponse { /** - *

        The time that the monitoring job was created.

        + * @internal */ - CreationTime: Date | undefined; + export const filterSensitiveLog = (obj: ListAppImageConfigsResponse): any => ({ + ...obj, + }); +} +export interface ListAppsRequest { /** - *

        The name of the endpoint that the job monitors.

        + *

        If the previous response was truncated, you will receive this token. + * Use it in your next request to receive the next set of results.

        */ - EndpointName: string | undefined; + NextToken?: string; + + /** + *

        Returns a list up to a specified limit.

        + */ + MaxResults?: number; + + /** + *

        The sort order for the results. The default is Ascending.

        + */ + SortOrder?: SortOrder | string; + + /** + *

        The parameter by which to sort the results. The default is CreationTime.

        + */ + SortBy?: AppSortKey | string; + + /** + *

        A parameter to search for the domain ID.

        + */ + DomainIdEquals?: string; + + /** + *

        A parameter to search by user profile name.

        + */ + UserProfileNameEquals?: string; } -export namespace MonitoringJobDefinitionSummary { +export namespace ListAppsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: MonitoringJobDefinitionSummary): any => ({ + export const filterSensitiveLog = (obj: ListAppsRequest): any => ({ ...obj, }); } -export interface ListDataQualityJobDefinitionsResponse { +export interface ListAppsResponse { /** - *

        A list of data quality monitoring job definitions.

        + *

        The list of apps.

        */ - JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; + Apps?: AppDetails[]; /** - *

        If the result of the previous ListDataQualityJobDefinitions request was - * truncated, the response includes a NextToken. To retrieve the next set of data - * quality monitoring job definitions, use the token in the next request.

        + *

        If the previous response was truncated, you will receive this token. + * Use it in your next request to receive the next set of results.

        */ NextToken?: string; } -export namespace ListDataQualityJobDefinitionsResponse { +export namespace ListAppsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListDataQualityJobDefinitionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListAppsResponse): any => ({ ...obj, }); } -export enum ListDeviceFleetsSortBy { - CreationTime = "CREATION_TIME", - LastModifiedTime = "LAST_MODIFIED_TIME", - Name = "NAME", +export enum SortArtifactsBy { + CREATION_TIME = "CreationTime", } -export interface ListDeviceFleetsRequest { - /** - *

        The response from the last list when returning a list large enough to need tokening.

        - */ - NextToken?: string; - +export interface ListArtifactsRequest { /** - *

        The maximum number of results to select.

        + *

        A filter that returns only artifacts with the specified source URI.

        */ - MaxResults?: number; + SourceUri?: string; /** - *

        Filter fleets where packaging job was created after specified time.

        + *

        A filter that returns only artifacts of the specified type.

        */ - CreationTimeAfter?: Date; + ArtifactType?: string; /** - *

        Filter fleets where the edge packaging job was created before specified time.

        + *

        A filter that returns only artifacts created on or after the specified time.

        */ - CreationTimeBefore?: Date; + CreatedAfter?: Date; /** - *

        Select fleets where the job was updated after X

        + *

        A filter that returns only artifacts created on or before the specified time.

        */ - LastModifiedTimeAfter?: Date; + CreatedBefore?: Date; /** - *

        Select fleets where the job was updated before X

        + *

        The property used to sort results. The default value is CreationTime.

        */ - LastModifiedTimeBefore?: Date; + SortBy?: SortArtifactsBy | string; /** - *

        Filter for fleets containing this name in their fleet device name.

        + *

        The sort order. The default value is Descending.

        */ - NameContains?: string; + SortOrder?: SortOrder | string; /** - *

        The column to sort by.

        + *

        If the previous call to ListArtifacts didn't return the full set of artifacts, + * the call returns a token for getting the next set of artifacts.

        */ - SortBy?: ListDeviceFleetsSortBy | string; + NextToken?: string; /** - *

        What direction to sort in.

        + *

        The maximum number of artifacts to return in the response. The default value is 10.

        */ - SortOrder?: SortOrder | string; + MaxResults?: number; } -export namespace ListDeviceFleetsRequest { +export namespace ListArtifactsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListDeviceFleetsRequest): any => ({ + export const filterSensitiveLog = (obj: ListArtifactsRequest): any => ({ ...obj, }); } -export interface ListDeviceFleetsResponse { +export interface ListArtifactsResponse { /** - *

        Summary of the device fleet.

        + *

        A list of artifacts and their properties.

        */ - DeviceFleetSummaries: DeviceFleetSummary[] | undefined; + ArtifactSummaries?: ArtifactSummary[]; /** - *

        The response from the last list when returning a list large enough to need tokening.

        + *

        A token for getting the next set of artifacts, if there are any.

        */ NextToken?: string; } -export namespace ListDeviceFleetsResponse { +export namespace ListArtifactsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListDeviceFleetsResponse): any => ({ + export const filterSensitiveLog = (obj: ListArtifactsResponse): any => ({ ...obj, }); } -export interface ListDevicesRequest { +export enum SortAssociationsBy { + CREATION_TIME = "CreationTime", + DESTINATION_ARN = "DestinationArn", + DESTINATION_TYPE = "DestinationType", + SOURCE_ARN = "SourceArn", + SOURCE_TYPE = "SourceType", +} + +export interface ListAssociationsRequest { /** - *

        The response from the last list when returning a list large enough to need tokening.

        + *

        A filter that returns only associations with the specified source ARN.

        */ - NextToken?: string; + SourceArn?: string; /** - *

        Maximum number of results to select.

        + *

        A filter that returns only associations with the specified destination Amazon Resource Name (ARN).

        */ - MaxResults?: number; + DestinationArn?: string; /** - *

        Select fleets where the job was updated after X

        + *

        A filter that returns only associations with the specified source type.

        */ - LatestHeartbeatAfter?: Date; + SourceType?: string; /** - *

        A filter that searches devices that contains this name in any of their models.

        + *

        A filter that returns only associations with the specified destination type.

        */ - ModelName?: string; + DestinationType?: string; /** - *

        Filter for fleets containing this name in their device fleet name.

        + *

        A filter that returns only associations of the specified type.

        */ - DeviceFleetName?: string; -} + AssociationType?: AssociationEdgeType | string; -export namespace ListDevicesRequest { /** - * @internal + *

        A filter that returns only associations created on or after the specified time.

        */ - export const filterSensitiveLog = (obj: ListDevicesRequest): any => ({ - ...obj, - }); -} + CreatedAfter?: Date; -export interface ListDevicesResponse { /** - *

        Summary of devices.

        + *

        A filter that returns only associations created on or before the specified time.

        */ - DeviceSummaries: DeviceSummary[] | undefined; + CreatedBefore?: Date; /** - *

        The response from the last list when returning a list large enough to need tokening.

        + *

        The property used to sort results. The default value is CreationTime.

        */ - NextToken?: string; -} + SortBy?: SortAssociationsBy | string; -export namespace ListDevicesResponse { /** - * @internal + *

        The sort order. The default value is Descending.

        */ - export const filterSensitiveLog = (obj: ListDevicesResponse): any => ({ - ...obj, - }); -} + SortOrder?: SortOrder | string; -export interface ListDomainsRequest { /** - *

        If the previous response was truncated, you will receive this token. - * Use it in your next request to receive the next set of results.

        + *

        If the previous call to ListAssociations didn't return the full set of associations, + * the call returns a token for getting the next set of associations.

        */ NextToken?: string; /** - *

        Returns a list up to a specified limit.

        + *

        The maximum number of associations to return in the response. The default value is 10.

        */ MaxResults?: number; } -export namespace ListDomainsRequest { +export namespace ListAssociationsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListDomainsRequest): any => ({ + export const filterSensitiveLog = (obj: ListAssociationsRequest): any => ({ ...obj, }); } -export interface ListDomainsResponse { +export interface ListAssociationsResponse { /** - *

        The list of domains.

        + *

        A list of associations and their properties.

        */ - Domains?: DomainDetails[]; + AssociationSummaries?: AssociationSummary[]; /** - *

        If the previous response was truncated, you will receive this token. - * Use it in your next request to receive the next set of results.

        + *

        A token for getting the next set of associations, if there are any.

        */ NextToken?: string; } -export namespace ListDomainsResponse { +export namespace ListAssociationsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListDomainsResponse): any => ({ + export const filterSensitiveLog = (obj: ListAssociationsResponse): any => ({ ...obj, }); } -export enum ListEdgePackagingJobsSortBy { - CreationTime = "CREATION_TIME", - EdgePackagingJobStatus = "STATUS", - LastModifiedTime = "LAST_MODIFIED_TIME", - ModelName = "MODEL_NAME", - Name = "NAME", -} - -export interface ListEdgePackagingJobsRequest { - /** - *

        The response from the last list when returning a list large enough to need tokening.

        - */ - NextToken?: string; - - /** - *

        Maximum number of results to select.

        - */ - MaxResults?: number; - +export interface ListAutoMLJobsRequest { /** - *

        Select jobs where the job was created after specified time.

        + *

        Request a list of jobs, using a filter for time.

        */ CreationTimeAfter?: Date; /** - *

        Select jobs where the job was created before specified time.

        + *

        Request a list of jobs, using a filter for time.

        */ CreationTimeBefore?: Date; /** - *

        Select jobs where the job was updated after specified time.

        + *

        Request a list of jobs, using a filter for time.

        */ LastModifiedTimeAfter?: Date; /** - *

        Select jobs where the job was updated before specified time.

        + *

        Request a list of jobs, using a filter for time.

        */ LastModifiedTimeBefore?: Date; /** - *

        Filter for jobs containing this name in their packaging job name.

        + *

        Request a list of jobs, using a search filter for name.

        */ NameContains?: string; /** - *

        Filter for jobs where the model name contains this string.

        + *

        Request a list of jobs, using a filter for status.

        */ - ModelNameContains?: string; + StatusEquals?: AutoMLJobStatus | string; /** - *

        The job status to filter for.

        + *

        The sort order for the results. The default is Descending.

        */ - StatusEquals?: EdgePackagingJobStatus | string; + SortOrder?: AutoMLSortOrder | string; /** - *

        Use to specify what column to sort by.

        + *

        The parameter by which to sort the results. The default is Name.

        */ - SortBy?: ListEdgePackagingJobsSortBy | string; + SortBy?: AutoMLSortBy | string; /** - *

        What direction to sort by.

        + *

        Request a list of jobs up to a specified limit.

        */ - SortOrder?: SortOrder | string; + MaxResults?: number; + + /** + *

        If the previous response was truncated, you receive this token. Use it in your next + * request to receive the next set of results.

        + */ + NextToken?: string; } -export namespace ListEdgePackagingJobsRequest { +export namespace ListAutoMLJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListEdgePackagingJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListAutoMLJobsRequest): any => ({ ...obj, }); } -export interface ListEdgePackagingJobsResponse { +export interface ListAutoMLJobsResponse { /** - *

        Summaries of edge packaging jobs.

        + *

        Returns a summary list of jobs.

        */ - EdgePackagingJobSummaries: EdgePackagingJobSummary[] | undefined; + AutoMLJobSummaries: AutoMLJobSummary[] | undefined; /** - *

        Token to use when calling the next page of results.

        + *

        If the previous response was truncated, you receive this token. Use it in your next + * request to receive the next set of results.

        */ NextToken?: string; } -export namespace ListEdgePackagingJobsResponse { +export namespace ListAutoMLJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListEdgePackagingJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListAutoMLJobsResponse): any => ({ ...obj, }); } -export enum OrderKey { - Ascending = "Ascending", - Descending = "Descending", -} - -export interface ListEndpointConfigsInput { +export interface ListCandidatesForAutoMLJobRequest { /** - *

        The field to sort results by. The default is CreationTime.

        + *

        List the candidates created for the job by providing the job's name.

        */ - SortBy?: EndpointConfigSortKey | string; + AutoMLJobName: string | undefined; /** - *

        The sort order for results. The default is Descending.

        + *

        List the candidates for the job and filter by status.

        */ - SortOrder?: OrderKey | string; + StatusEquals?: CandidateStatus | string; /** - *

        If the result of the previous ListEndpointConfig request was - * truncated, the response includes a NextToken. To retrieve the next set of - * endpoint configurations, use the token in the next request.

        + *

        List the candidates for the job and filter by candidate name.

        */ - NextToken?: string; + CandidateNameEquals?: string; /** - *

        The maximum number of training jobs to return in the response.

        + *

        The sort order for the results. The default is Ascending.

        */ - MaxResults?: number; + SortOrder?: AutoMLSortOrder | string; /** - *

        A string in the endpoint configuration name. This filter returns only endpoint - * configurations whose name contains the specified string.

        + *

        The parameter by which to sort the results. The default is + * Descending.

        */ - NameContains?: string; + SortBy?: CandidateSortBy | string; /** - *

        A filter that returns only endpoint configurations created before the specified - * time (timestamp).

        + *

        List the job's candidates up to a specified limit.

        */ - CreationTimeBefore?: Date; + MaxResults?: number; /** - *

        A filter that returns only endpoint configurations with a creation time greater - * than or equal to the specified time (timestamp).

        + *

        If the previous response was truncated, you receive this token. Use it in your next + * request to receive the next set of results.

        */ - CreationTimeAfter?: Date; + NextToken?: string; } -export namespace ListEndpointConfigsInput { +export namespace ListCandidatesForAutoMLJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListEndpointConfigsInput): any => ({ + export const filterSensitiveLog = (obj: ListCandidatesForAutoMLJobRequest): any => ({ ...obj, }); } -export interface ListEndpointConfigsOutput { +export interface ListCandidatesForAutoMLJobResponse { /** - *

        An array of endpoint configurations.

        + *

        Summaries about the AutoMLCandidates.

        */ - EndpointConfigs: EndpointConfigSummary[] | undefined; + Candidates: AutoMLCandidate[] | undefined; /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of - * endpoint configurations, use it in the subsequent request

        + *

        If the previous response was truncated, you receive this token. Use it in your next + * request to receive the next set of results.

        */ NextToken?: string; } -export namespace ListEndpointConfigsOutput { +export namespace ListCandidatesForAutoMLJobResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListEndpointConfigsOutput): any => ({ + export const filterSensitiveLog = (obj: ListCandidatesForAutoMLJobResponse): any => ({ ...obj, }); } -export interface ListEndpointsInput { - /** - *

        Sorts the list of results. The default is CreationTime.

        - */ - SortBy?: EndpointSortKey | string; - +export interface ListCodeRepositoriesInput { /** - *

        The sort order for results. The default is Descending.

        + *

        A filter that returns only Git repositories that were created after the specified + * time.

        */ - SortOrder?: OrderKey | string; + CreationTimeAfter?: Date; /** - *

        If the result of a ListEndpoints request was truncated, the response - * includes a NextToken. To retrieve the next set of endpoints, use the token - * in the next request.

        + *

        A filter that returns only Git repositories that were created before the specified + * time.

        */ - NextToken?: string; + CreationTimeBefore?: Date; /** - *

        The maximum number of endpoints to return in the response. This value defaults to - * 10.

        + *

        A filter that returns only Git repositories that were last modified after the + * specified time.

        */ - MaxResults?: number; + LastModifiedTimeAfter?: Date; /** - *

        A string in endpoint names. This filter returns only endpoints whose name contains - * the specified string.

        + *

        A filter that returns only Git repositories that were last modified before the + * specified time.

        */ - NameContains?: string; + LastModifiedTimeBefore?: Date; /** - *

        A filter that returns only endpoints that were created before the specified time - * (timestamp).

        + *

        The maximum number of Git repositories to return in the response.

        */ - CreationTimeBefore?: Date; + MaxResults?: number; /** - *

        A filter that returns only endpoints with a creation time greater than or equal to - * the specified time (timestamp).

        + *

        A string in the Git repositories name. This filter returns only repositories whose + * name contains the specified string.

        */ - CreationTimeAfter?: Date; + NameContains?: string; /** - *

        A filter that returns only endpoints that were modified before the specified - * timestamp.

        + *

        If the result of a ListCodeRepositoriesOutput request was truncated, the + * response includes a NextToken. To get the next set of Git repositories, use + * the token in the next request.

        */ - LastModifiedTimeBefore?: Date; + NextToken?: string; /** - *

        A filter that returns only endpoints that were modified after the specified - * timestamp.

        + *

        The field to sort results by. The default is Name.

        */ - LastModifiedTimeAfter?: Date; + SortBy?: CodeRepositorySortBy | string; /** - *

        A filter that returns only endpoints with the specified status.

        + *

        The sort order for results. The default is Ascending.

        */ - StatusEquals?: EndpointStatus | string; + SortOrder?: CodeRepositorySortOrder | string; } -export namespace ListEndpointsInput { +export namespace ListCodeRepositoriesInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListEndpointsInput): any => ({ + export const filterSensitiveLog = (obj: ListCodeRepositoriesInput): any => ({ ...obj, }); } -export interface ListEndpointsOutput { +export interface ListCodeRepositoriesOutput { /** - *

        An array or endpoint objects.

        + *

        Gets a list of summaries of the Git repositories. Each summary specifies the following + * values for the repository:

        + *
          + *
        • + *

          Name

          + *
        • + *
        • + *

          Amazon Resource Name (ARN)

          + *
        • + *
        • + *

          Creation time

          + *
        • + *
        • + *

          Last modified time

          + *
        • + *
        • + *

          Configuration information, including the URL location of the repository and + * the ARN of the Amazon Web Services Secrets Manager secret that contains the credentials used + * to access the repository.

          + *
        • + *
        */ - Endpoints: EndpointSummary[] | undefined; + CodeRepositorySummaryList: CodeRepositorySummary[] | undefined; /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of - * training jobs, use it in the subsequent request.

        + *

        If the result of a ListCodeRepositoriesOutput request was truncated, the + * response includes a NextToken. To get the next set of Git repositories, use + * the token in the next request.

        */ NextToken?: string; } -export namespace ListEndpointsOutput { +export namespace ListCodeRepositoriesOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListEndpointsOutput): any => ({ + export const filterSensitiveLog = (obj: ListCodeRepositoriesOutput): any => ({ ...obj, }); } -export enum SortExperimentsBy { +export enum ListCompilationJobsSortBy { CREATION_TIME = "CreationTime", NAME = "Name", + STATUS = "Status", } -export interface ListExperimentsRequest { +export interface ListCompilationJobsRequest { /** - *

        A filter that returns only experiments created after the specified time.

        + *

        If the result of the previous ListCompilationJobs request was truncated, + * the response includes a NextToken. To retrieve the next set of model + * compilation jobs, use the token in the next request.

        */ - CreatedAfter?: Date; + NextToken?: string; /** - *

        A filter that returns only experiments created before the specified time.

        + *

        The maximum number of model compilation jobs to return in the response.

        */ - CreatedBefore?: Date; + MaxResults?: number; /** - *

        The property used to sort results. The default value is CreationTime.

        + *

        A filter that returns the model compilation jobs that were created after a specified + * time.

        */ - SortBy?: SortExperimentsBy | string; + CreationTimeAfter?: Date; /** - *

        The sort order. The default value is Descending.

        + *

        A filter that returns the model compilation jobs that were created before a specified + * time.

        */ - SortOrder?: SortOrder | string; + CreationTimeBefore?: Date; /** - *

        If the previous call to ListExperiments didn't return the full set of - * experiments, the call returns a token for getting the next set of experiments.

        + *

        A filter that returns the model compilation jobs that were modified after a specified + * time.

        */ - NextToken?: string; + LastModifiedTimeAfter?: Date; /** - *

        The maximum number of experiments to return in the response. The default value is - * 10.

        + *

        A filter that returns the model compilation jobs that were modified before a specified + * time.

        */ - MaxResults?: number; + LastModifiedTimeBefore?: Date; + + /** + *

        A filter that returns the model compilation jobs whose name contains a specified + * string.

        + */ + NameContains?: string; + + /** + *

        A filter that retrieves model compilation jobs with a specific DescribeCompilationJobResponse$CompilationJobStatus status.

        + */ + StatusEquals?: CompilationJobStatus | string; + + /** + *

        The field by which to sort results. The default is CreationTime.

        + */ + SortBy?: ListCompilationJobsSortBy | string; + + /** + *

        The sort order for results. The default is Ascending.

        + */ + SortOrder?: SortOrder | string; } -export namespace ListExperimentsRequest { +export namespace ListCompilationJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListExperimentsRequest): any => ({ + export const filterSensitiveLog = (obj: ListCompilationJobsRequest): any => ({ ...obj, }); } -export interface ListExperimentsResponse { +export interface ListCompilationJobsResponse { /** - *

        A list of the summaries of your experiments.

        + *

        An array of CompilationJobSummary objects, each describing a model + * compilation job.

        */ - ExperimentSummaries?: ExperimentSummary[]; + CompilationJobSummaries: CompilationJobSummary[] | undefined; /** - *

        A token for getting the next set of experiments, if there are any.

        + *

        If the response is truncated, Amazon SageMaker returns this NextToken. To retrieve + * the next set of model compilation jobs, use this token in the next request.

        */ NextToken?: string; } -export namespace ListExperimentsResponse { +export namespace ListCompilationJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListExperimentsResponse): any => ({ + export const filterSensitiveLog = (obj: ListCompilationJobsResponse): any => ({ ...obj, }); } -export interface ListFeatureGroupsRequest { - /** - *

        A string that partially matches one or more FeatureGroups names. Filters - * FeatureGroups by name.

        - */ - NameContains?: string; +export enum SortContextsBy { + CREATION_TIME = "CreationTime", + NAME = "Name", +} +export interface ListContextsRequest { /** - *

        A FeatureGroup status. Filters by FeatureGroup status.

        + *

        A filter that returns only contexts with the specified source URI.

        */ - FeatureGroupStatusEquals?: FeatureGroupStatus | string; + SourceUri?: string; /** - *

        An OfflineStore status. Filters by OfflineStore status.

        + *

        A filter that returns only contexts of the specified type.

        */ - OfflineStoreStatusEquals?: OfflineStoreStatusValue | string; + ContextType?: string; /** - *

        Use this parameter to search for FeatureGroupss created after a specific - * date and time.

        + *

        A filter that returns only contexts created on or after the specified time.

        */ - CreationTimeAfter?: Date; + CreatedAfter?: Date; /** - *

        Use this parameter to search for FeatureGroupss created before a specific - * date and time.

        + *

        A filter that returns only contexts created on or before the specified time.

        */ - CreationTimeBefore?: Date; + CreatedBefore?: Date; /** - *

        The order in which feature groups are listed.

        + *

        The property used to sort results. The default value is CreationTime.

        */ - SortOrder?: FeatureGroupSortOrder | string; + SortBy?: SortContextsBy | string; /** - *

        The value on which the feature group list is sorted.

        + *

        The sort order. The default value is Descending.

        */ - SortBy?: FeatureGroupSortBy | string; + SortOrder?: SortOrder | string; /** - *

        The maximum number of results returned by ListFeatureGroups.

        + *

        If the previous call to ListContexts didn't return the full set of contexts, + * the call returns a token for getting the next set of contexts.

        */ - MaxResults?: number; + NextToken?: string; /** - *

        A token to resume pagination of ListFeatureGroups results.

        + *

        The maximum number of contexts to return in the response. The default value is 10.

        */ - NextToken?: string; + MaxResults?: number; } -export namespace ListFeatureGroupsRequest { +export namespace ListContextsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListFeatureGroupsRequest): any => ({ + export const filterSensitiveLog = (obj: ListContextsRequest): any => ({ ...obj, }); } -export interface ListFeatureGroupsResponse { +export interface ListContextsResponse { /** - *

        A summary of feature groups.

        + *

        A list of contexts and their properties.

        */ - FeatureGroupSummaries: FeatureGroupSummary[] | undefined; + ContextSummaries?: ContextSummary[]; /** - *

        A token to resume pagination of ListFeatureGroups results.

        + *

        A token for getting the next set of contexts, if there are any.

        */ - NextToken: string | undefined; + NextToken?: string; } -export namespace ListFeatureGroupsResponse { +export namespace ListContextsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListFeatureGroupsResponse): any => ({ + export const filterSensitiveLog = (obj: ListContextsResponse): any => ({ ...obj, }); } -export interface ListFlowDefinitionsRequest { +export enum MonitoringJobDefinitionSortKey { + CREATION_TIME = "CreationTime", + NAME = "Name", +} + +export interface ListDataQualityJobDefinitionsRequest { /** - *

        A filter that returns only flow definitions with a creation time greater than or equal to the specified timestamp.

        + *

        A filter that lists the data quality job definitions associated with the specified + * endpoint.

        */ - CreationTimeAfter?: Date; + EndpointName?: string; /** - *

        A filter that returns only flow definitions that were created before the specified timestamp.

        + *

        The field to sort results by. The default is CreationTime.

        */ - CreationTimeBefore?: Date; + SortBy?: MonitoringJobDefinitionSortKey | string; /** - *

        An optional value that specifies whether you want the results sorted in Ascending or Descending order.

        + *

        The sort order for results. The default is Descending.

        */ SortOrder?: SortOrder | string; /** - *

        A token to resume pagination.

        + *

        If the result of the previous ListDataQualityJobDefinitions request was + * truncated, the response includes a NextToken. To retrieve the next set of + * transform jobs, use the token in the next request.>

        */ NextToken?: string; /** - *

        The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken will be provided in the output that you can use to resume pagination.

        + *

        The maximum number of data quality monitoring job definitions to return in the + * response.

        */ MaxResults?: number; -} -export namespace ListFlowDefinitionsRequest { /** - * @internal + *

        A string in the data quality monitoring job definition name. This filter returns only + * data quality monitoring job definitions whose name contains the specified string.

        */ - export const filterSensitiveLog = (obj: ListFlowDefinitionsRequest): any => ({ - ...obj, - }); -} + NameContains?: string; -export interface ListFlowDefinitionsResponse { /** - *

        An array of objects describing the flow definitions.

        + *

        A filter that returns only data quality monitoring job definitions created before the + * specified time.

        */ - FlowDefinitionSummaries: FlowDefinitionSummary[] | undefined; + CreationTimeBefore?: Date; /** - *

        A token to resume pagination.

        + *

        A filter that returns only data quality monitoring job definitions created after the + * specified time.

        */ - NextToken?: string; + CreationTimeAfter?: Date; } -export namespace ListFlowDefinitionsResponse { +export namespace ListDataQualityJobDefinitionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListFlowDefinitionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListDataQualityJobDefinitionsRequest): any => ({ ...obj, }); } -export interface ListHumanTaskUisRequest { - /** - *

        A filter that returns only human task user interfaces with a creation time greater than or equal to the specified timestamp.

        - */ - CreationTimeAfter?: Date; - +/** + *

        Summary information about a monitoring job.

        + */ +export interface MonitoringJobDefinitionSummary { /** - *

        A filter that returns only human task user interfaces that were created before the specified timestamp.

        + *

        The name of the monitoring job.

        */ - CreationTimeBefore?: Date; + MonitoringJobDefinitionName: string | undefined; /** - *

        An optional value that specifies whether you want the results sorted in Ascending or Descending order.

        + *

        The Amazon Resource Name (ARN) of the monitoring job.

        */ - SortOrder?: SortOrder | string; + MonitoringJobDefinitionArn: string | undefined; /** - *

        A token to resume pagination.

        + *

        The time that the monitoring job was created.

        */ - NextToken?: string; + CreationTime: Date | undefined; /** - *

        The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken will be provided in the output that you can use to resume pagination.

        + *

        The name of the endpoint that the job monitors.

        */ - MaxResults?: number; + EndpointName: string | undefined; } -export namespace ListHumanTaskUisRequest { +export namespace MonitoringJobDefinitionSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListHumanTaskUisRequest): any => ({ + export const filterSensitiveLog = (obj: MonitoringJobDefinitionSummary): any => ({ ...obj, }); } -export interface ListHumanTaskUisResponse { +export interface ListDataQualityJobDefinitionsResponse { /** - *

        An array of objects describing the human task user interfaces.

        + *

        A list of data quality monitoring job definitions.

        */ - HumanTaskUiSummaries: HumanTaskUiSummary[] | undefined; + JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; /** - *

        A token to resume pagination.

        + *

        If the result of the previous ListDataQualityJobDefinitions request was + * truncated, the response includes a NextToken. To retrieve the next set of data + * quality monitoring job definitions, use the token in the next request.

        */ NextToken?: string; } -export namespace ListHumanTaskUisResponse { +export namespace ListDataQualityJobDefinitionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListHumanTaskUisResponse): any => ({ + export const filterSensitiveLog = (obj: ListDataQualityJobDefinitionsResponse): any => ({ ...obj, }); } -export interface ListHyperParameterTuningJobsRequest { +export enum ListDeviceFleetsSortBy { + CreationTime = "CREATION_TIME", + LastModifiedTime = "LAST_MODIFIED_TIME", + Name = "NAME", +} + +export interface ListDeviceFleetsRequest { /** - *

        If the result of the previous ListHyperParameterTuningJobs request was - * truncated, the response includes a NextToken. To retrieve the next set of - * tuning jobs, use the token in the next request.

        + *

        The response from the last list when returning a list large enough to need tokening.

        */ NextToken?: string; /** - *

        The - * maximum number of tuning jobs to return. The default value is - * 10.

        + *

        The maximum number of results to select.

        */ MaxResults?: number; /** - *

        The field to sort results by. The default is Name.

        - */ - SortBy?: HyperParameterTuningJobSortByOptions | string; - - /** - *

        The sort order for results. The default is Ascending.

        + *

        Filter fleets where packaging job was created after specified time.

        */ - SortOrder?: SortOrder | string; + CreationTimeAfter?: Date; /** - *

        A string in the tuning job name. This filter returns only tuning jobs whose name - * contains the specified string.

        + *

        Filter fleets where the edge packaging job was created before specified time.

        */ - NameContains?: string; + CreationTimeBefore?: Date; /** - *

        A filter that returns only tuning jobs that were created after the specified - * time.

        + *

        Select fleets where the job was updated after X

        */ - CreationTimeAfter?: Date; + LastModifiedTimeAfter?: Date; /** - *

        A filter that returns only tuning jobs that were created before the specified - * time.

        + *

        Select fleets where the job was updated before X

        */ - CreationTimeBefore?: Date; + LastModifiedTimeBefore?: Date; /** - *

        A filter that returns only tuning jobs that were modified after the specified - * time.

        + *

        Filter for fleets containing this name in their fleet device name.

        */ - LastModifiedTimeAfter?: Date; + NameContains?: string; /** - *

        A filter that returns only tuning jobs that were modified before the specified - * time.

        + *

        The column to sort by.

        */ - LastModifiedTimeBefore?: Date; + SortBy?: ListDeviceFleetsSortBy | string; /** - *

        A filter that returns only tuning jobs with the specified status.

        + *

        What direction to sort in.

        */ - StatusEquals?: HyperParameterTuningJobStatus | string; + SortOrder?: SortOrder | string; } -export namespace ListHyperParameterTuningJobsRequest { +export namespace ListDeviceFleetsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListHyperParameterTuningJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListDeviceFleetsRequest): any => ({ ...obj, }); } -export interface ListHyperParameterTuningJobsResponse { +export interface ListDeviceFleetsResponse { /** - *

        A list of HyperParameterTuningJobSummary objects that - * describe - * the tuning jobs that the ListHyperParameterTuningJobs - * request returned.

        + *

        Summary of the device fleet.

        */ - HyperParameterTuningJobSummaries: HyperParameterTuningJobSummary[] | undefined; + DeviceFleetSummaries: DeviceFleetSummary[] | undefined; /** - *

        If the result of this ListHyperParameterTuningJobs request was truncated, - * the response includes a NextToken. To retrieve the next set of tuning jobs, - * use the token in the next request.

        + *

        The response from the last list when returning a list large enough to need tokening.

        */ NextToken?: string; } -export namespace ListHyperParameterTuningJobsResponse { +export namespace ListDeviceFleetsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListHyperParameterTuningJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListDeviceFleetsResponse): any => ({ ...obj, }); } -export interface ListImagesRequest { +export interface ListDevicesRequest { /** - *

        A filter that returns only images created on or after the specified time.

        + *

        The response from the last list when returning a list large enough to need tokening.

        */ - CreationTimeAfter?: Date; + NextToken?: string; /** - *

        A filter that returns only images created on or before the specified time.

        + *

        Maximum number of results to select.

        */ - CreationTimeBefore?: Date; + MaxResults?: number; /** - *

        A filter that returns only images modified on or after the specified time.

        + *

        Select fleets where the job was updated after X

        */ - LastModifiedTimeAfter?: Date; + LatestHeartbeatAfter?: Date; /** - *

        A filter that returns only images modified on or before the specified time.

        + *

        A filter that searches devices that contains this name in any of their models.

        */ - LastModifiedTimeBefore?: Date; + ModelName?: string; /** - *

        The maximum number of images to return in the response. The default value is 10.

        + *

        Filter for fleets containing this name in their device fleet name.

        */ - MaxResults?: number; + DeviceFleetName?: string; +} +export namespace ListDevicesRequest { /** - *

        A filter that returns only images whose name contains the specified string.

        + * @internal */ - NameContains?: string; + export const filterSensitiveLog = (obj: ListDevicesRequest): any => ({ + ...obj, + }); +} +export interface ListDevicesResponse { /** - *

        If the previous call to ListImages didn't return the full set of images, - * the call returns a token for getting the next set of images.

        + *

        Summary of devices.

        + */ + DeviceSummaries: DeviceSummary[] | undefined; + + /** + *

        The response from the last list when returning a list large enough to need tokening.

        */ NextToken?: string; +} +export namespace ListDevicesResponse { /** - *

        The property used to sort results. The default value is CREATION_TIME.

        + * @internal */ - SortBy?: ImageSortBy | string; + export const filterSensitiveLog = (obj: ListDevicesResponse): any => ({ + ...obj, + }); +} +export interface ListDomainsRequest { /** - *

        The sort order. The default value is DESCENDING.

        + *

        If the previous response was truncated, you will receive this token. + * Use it in your next request to receive the next set of results.

        */ - SortOrder?: ImageSortOrder | string; + NextToken?: string; + + /** + *

        Returns a list up to a specified limit.

        + */ + MaxResults?: number; } -export namespace ListImagesRequest { +export namespace ListDomainsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListImagesRequest): any => ({ + export const filterSensitiveLog = (obj: ListDomainsRequest): any => ({ ...obj, }); } -export interface ListImagesResponse { +export interface ListDomainsResponse { /** - *

        A list of images and their properties.

        + *

        The list of domains.

        */ - Images?: Image[]; + Domains?: DomainDetails[]; /** - *

        A token for getting the next set of images, if there are any.

        + *

        If the previous response was truncated, you will receive this token. + * Use it in your next request to receive the next set of results.

        */ NextToken?: string; } -export namespace ListImagesResponse { +export namespace ListDomainsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListImagesResponse): any => ({ + export const filterSensitiveLog = (obj: ListDomainsResponse): any => ({ ...obj, }); } -export interface ListImageVersionsRequest { +export enum ListEdgeDeploymentPlansSortBy { + CreationTime = "CREATION_TIME", + DeviceFleetName = "DEVICE_FLEET_NAME", + LastModifiedTime = "LAST_MODIFIED_TIME", + Name = "NAME", +} + +export interface ListEdgeDeploymentPlansRequest { /** - *

        A filter that returns only versions created on or after the specified time.

        + *

        The response from the last list when returning a list large enough to need tokening.

        */ - CreationTimeAfter?: Date; + NextToken?: string; /** - *

        A filter that returns only versions created on or before the specified time.

        + *

        The maximum number of results to select (50 by default).

        */ - CreationTimeBefore?: Date; + MaxResults?: number; /** - *

        The name of the image to list the versions of.

        + *

        Selects edge deployment plans created after this time.

        */ - ImageName: string | undefined; + CreationTimeAfter?: Date; /** - *

        A filter that returns only versions modified on or after the specified time.

        + *

        Selects edge deployment plans created before this time.

        + */ + CreationTimeBefore?: Date; + + /** + *

        Selects edge deployment plans that were last updated after this time.

        */ LastModifiedTimeAfter?: Date; /** - *

        A filter that returns only versions modified on or before the specified time.

        + *

        Selects edge deployment plans that were last updated before this time.

        */ LastModifiedTimeBefore?: Date; /** - *

        The maximum number of versions to return in the response. The default value is 10.

        + *

        Selects edge deployment plans with names containing this name.

        */ - MaxResults?: number; + NameContains?: string; /** - *

        If the previous call to ListImageVersions didn't return the full set of - * versions, the call returns a token for getting the next set of versions.

        + *

        Selects edge deployment plans with a device fleet name containing this name.

        */ - NextToken?: string; + DeviceFleetNameContains?: string; /** - *

        The property used to sort results. The default value is CREATION_TIME.

        + *

        The column by which to sort the edge deployment plans. Can be one of NAME, DEVICEFLEETNAME, CREATIONTIME, LASTMODIFIEDTIME.

        */ - SortBy?: ImageVersionSortBy | string; + SortBy?: ListEdgeDeploymentPlansSortBy | string; /** - *

        The sort order. The default value is DESCENDING.

        + *

        The direction of the sorting (ascending or descending).

        */ - SortOrder?: ImageVersionSortOrder | string; + SortOrder?: SortOrder | string; } -export namespace ListImageVersionsRequest { +export namespace ListEdgeDeploymentPlansRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListImageVersionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListEdgeDeploymentPlansRequest): any => ({ ...obj, }); } -export interface ListImageVersionsResponse { +export interface ListEdgeDeploymentPlansResponse { /** - *

        A list of versions and their properties.

        + *

        List of summaries of edge deployment plans.

        */ - ImageVersions?: ImageVersion[]; + EdgeDeploymentPlanSummaries: EdgeDeploymentPlanSummary[] | undefined; /** - *

        A token for getting the next set of versions, if there are any.

        + *

        The token to use when calling the next page of results.

        */ NextToken?: string; } -export namespace ListImageVersionsResponse { +export namespace ListEdgeDeploymentPlansResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListImageVersionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListEdgeDeploymentPlansResponse): any => ({ ...obj, }); } -export enum ListInferenceRecommendationsJobsSortBy { - CREATION_TIME = "CreationTime", - NAME = "Name", - STATUS = "Status", +export enum ListEdgePackagingJobsSortBy { + CreationTime = "CREATION_TIME", + EdgePackagingJobStatus = "STATUS", + LastModifiedTime = "LAST_MODIFIED_TIME", + ModelName = "MODEL_NAME", + Name = "NAME", } -export interface ListInferenceRecommendationsJobsRequest { +export interface ListEdgePackagingJobsRequest { /** - *

        A filter that returns only jobs created after the specified time (timestamp).

        + *

        The response from the last list when returning a list large enough to need tokening.

        + */ + NextToken?: string; + + /** + *

        Maximum number of results to select.

        + */ + MaxResults?: number; + + /** + *

        Select jobs where the job was created after specified time.

        */ CreationTimeAfter?: Date; /** - *

        A filter that returns only jobs created before the specified time (timestamp).

        + *

        Select jobs where the job was created before specified time.

        */ CreationTimeBefore?: Date; /** - *

        A filter that returns only jobs that were last modified after the specified time (timestamp).

        + *

        Select jobs where the job was updated after specified time.

        */ LastModifiedTimeAfter?: Date; /** - *

        A filter that returns only jobs that were last modified before the specified time (timestamp).

        + *

        Select jobs where the job was updated before specified time.

        */ LastModifiedTimeBefore?: Date; /** - *

        A string in the job name. This filter returns only recommendations whose name contains the specified string.

        + *

        Filter for jobs containing this name in their packaging job name.

        */ NameContains?: string; /** - *

        A filter that retrieves only inference recommendations jobs with a specific status.

        - */ - StatusEquals?: RecommendationJobStatus | string; - - /** - *

        The parameter by which to sort the results.

        + *

        Filter for jobs where the model name contains this string.

        */ - SortBy?: ListInferenceRecommendationsJobsSortBy | string; + ModelNameContains?: string; /** - *

        The sort order for the results.

        + *

        The job status to filter for.

        */ - SortOrder?: SortOrder | string; + StatusEquals?: EdgePackagingJobStatus | string; /** - *

        If the response to a previous ListInferenceRecommendationsJobsRequest request - * was truncated, the response includes a NextToken. To retrieve the next set - * of recommendations, use the token in the next request.

        + *

        Use to specify what column to sort by.

        */ - NextToken?: string; + SortBy?: ListEdgePackagingJobsSortBy | string; /** - *

        The maximum number of recommendations to return in the response.

        + *

        What direction to sort by.

        */ - MaxResults?: number; + SortOrder?: SortOrder | string; } -export namespace ListInferenceRecommendationsJobsRequest { +export namespace ListEdgePackagingJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListInferenceRecommendationsJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListEdgePackagingJobsRequest): any => ({ ...obj, }); } -export interface ListInferenceRecommendationsJobsResponse { +export interface ListEdgePackagingJobsResponse { /** - *

        The recommendations created from the Amazon SageMaker Inference Recommender job.

        + *

        Summaries of edge packaging jobs.

        */ - InferenceRecommendationsJobs: InferenceRecommendationsJob[] | undefined; + EdgePackagingJobSummaries: EdgePackagingJobSummary[] | undefined; /** - *

        A token for getting the next set of recommendations, if there are any.

        + *

        Token to use when calling the next page of results.

        */ NextToken?: string; } -export namespace ListInferenceRecommendationsJobsResponse { +export namespace ListEdgePackagingJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListInferenceRecommendationsJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListEdgePackagingJobsResponse): any => ({ ...obj, }); } -export enum SortBy { - CREATION_TIME = "CreationTime", - NAME = "Name", - STATUS = "Status", +export enum OrderKey { + Ascending = "Ascending", + Descending = "Descending", } -export interface ListLabelingJobsRequest { - /** - *

        A filter that returns only labeling jobs created after the specified time - * (timestamp).

        - */ - CreationTimeAfter?: Date; - +export interface ListEndpointConfigsInput { /** - *

        A filter that returns only labeling jobs created before the specified time - * (timestamp).

        + *

        The field to sort results by. The default is CreationTime.

        */ - CreationTimeBefore?: Date; + SortBy?: EndpointConfigSortKey | string; /** - *

        A filter that returns only labeling jobs modified after the specified time - * (timestamp).

        + *

        The sort order for results. The default is Descending.

        */ - LastModifiedTimeAfter?: Date; + SortOrder?: OrderKey | string; /** - *

        A filter that returns only labeling jobs modified before the specified time - * (timestamp).

        + *

        If the result of the previous ListEndpointConfig request was + * truncated, the response includes a NextToken. To retrieve the next set of + * endpoint configurations, use the token in the next request.

        */ - LastModifiedTimeBefore?: Date; + NextToken?: string; /** - *

        The maximum number of labeling jobs to return in each page of the response.

        + *

        The maximum number of training jobs to return in the response.

        */ MaxResults?: number; /** - *

        If the result of the previous ListLabelingJobs request was truncated, the - * response includes a NextToken. To retrieve the next set of labeling jobs, - * use the token in the next request.

        - */ - NextToken?: string; - - /** - *

        A string in the labeling job name. This filter returns only labeling jobs whose name - * contains the specified string.

        + *

        A string in the endpoint configuration name. This filter returns only endpoint + * configurations whose name contains the specified string.

        */ NameContains?: string; /** - *

        The field to sort results by. The default is CreationTime.

        - */ - SortBy?: SortBy | string; - - /** - *

        The sort order for results. The default is Ascending.

        + *

        A filter that returns only endpoint configurations created before the specified + * time (timestamp).

        */ - SortOrder?: SortOrder | string; + CreationTimeBefore?: Date; /** - *

        A filter that retrieves only labeling jobs with a specific status.

        + *

        A filter that returns only endpoint configurations with a creation time greater + * than or equal to the specified time (timestamp).

        */ - StatusEquals?: LabelingJobStatus | string; + CreationTimeAfter?: Date; } -export namespace ListLabelingJobsRequest { +export namespace ListEndpointConfigsInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListLabelingJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListEndpointConfigsInput): any => ({ ...obj, }); } -export interface ListLabelingJobsResponse { +export interface ListEndpointConfigsOutput { /** - *

        An array of LabelingJobSummary objects, each describing a labeling - * job.

        + *

        An array of endpoint configurations.

        */ - LabelingJobSummaryList?: LabelingJobSummary[]; + EndpointConfigs: EndpointConfigSummary[] | undefined; /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of - * labeling jobs, use it in the subsequent request.

        + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of + * endpoint configurations, use it in the subsequent request

        */ NextToken?: string; } -export namespace ListLabelingJobsResponse { +export namespace ListEndpointConfigsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListLabelingJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListEndpointConfigsOutput): any => ({ ...obj, }); } -export enum ListLabelingJobsForWorkteamSortByOptions { - CREATION_TIME = "CreationTime", -} - -export interface ListLabelingJobsForWorkteamRequest { +export interface ListEndpointsInput { /** - *

        The Amazon Resource Name (ARN) of the work team for which you want to see labeling - * jobs for.

        + *

        Sorts the list of results. The default is CreationTime.

        */ - WorkteamArn: string | undefined; + SortBy?: EndpointSortKey | string; /** - *

        The maximum number of labeling jobs to return in each page of the response.

        + *

        The sort order for results. The default is Descending.

        */ - MaxResults?: number; + SortOrder?: OrderKey | string; /** - *

        If the result of the previous ListLabelingJobsForWorkteam request was - * truncated, the response includes a NextToken. To retrieve the next set of - * labeling jobs, use the token in the next request.

        + *

        If the result of a ListEndpoints request was truncated, the response + * includes a NextToken. To retrieve the next set of endpoints, use the token + * in the next request.

        */ NextToken?: string; /** - *

        A filter that returns only labeling jobs created after the specified time - * (timestamp).

        + *

        The maximum number of endpoints to return in the response. This value defaults to + * 10.

        */ - CreationTimeAfter?: Date; + MaxResults?: number; /** - *

        A filter that returns only labeling jobs created before the specified time + *

        A string in endpoint names. This filter returns only endpoints whose name contains + * the specified string.

        + */ + NameContains?: string; + + /** + *

        A filter that returns only endpoints that were created before the specified time * (timestamp).

        */ CreationTimeBefore?: Date; /** - *

        A filter the limits jobs to only the ones whose job reference code contains the - * specified string.

        + *

        A filter that returns only endpoints with a creation time greater than or equal to + * the specified time (timestamp).

        */ - JobReferenceCodeContains?: string; + CreationTimeAfter?: Date; /** - *

        The field to sort results by. The default is CreationTime.

        + *

        A filter that returns only endpoints that were modified before the specified + * timestamp.

        */ - SortBy?: ListLabelingJobsForWorkteamSortByOptions | string; + LastModifiedTimeBefore?: Date; /** - *

        The sort order for results. The default is Ascending.

        + *

        A filter that returns only endpoints that were modified after the specified + * timestamp.

        */ - SortOrder?: SortOrder | string; + LastModifiedTimeAfter?: Date; + + /** + *

        A filter that returns only endpoints with the specified status.

        + */ + StatusEquals?: EndpointStatus | string; } -export namespace ListLabelingJobsForWorkteamRequest { +export namespace ListEndpointsInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListLabelingJobsForWorkteamRequest): any => ({ + export const filterSensitiveLog = (obj: ListEndpointsInput): any => ({ ...obj, }); } -export interface ListLabelingJobsForWorkteamResponse { +export interface ListEndpointsOutput { /** - *

        An array of LabelingJobSummary objects, each describing a labeling - * job.

        + *

        An array or endpoint objects.

        */ - LabelingJobSummaryList: LabelingJobForWorkteamSummary[] | undefined; + Endpoints: EndpointSummary[] | undefined; /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of - * labeling jobs, use it in the subsequent request.

        + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of + * training jobs, use it in the subsequent request.

        */ NextToken?: string; } -export namespace ListLabelingJobsForWorkteamResponse { +export namespace ListEndpointsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListLabelingJobsForWorkteamResponse): any => ({ + export const filterSensitiveLog = (obj: ListEndpointsOutput): any => ({ ...obj, }); } -export enum SortLineageGroupsBy { +export enum SortExperimentsBy { CREATION_TIME = "CreationTime", NAME = "Name", } -export interface ListLineageGroupsRequest { +export interface ListExperimentsRequest { /** - *

        A timestamp to filter against lineage groups created after a certain point in time.

        + *

        A filter that returns only experiments created after the specified time.

        */ CreatedAfter?: Date; /** - *

        A timestamp to filter against lineage groups created before a certain point in time.

        + *

        A filter that returns only experiments created before the specified time.

        */ CreatedBefore?: Date; /** - *

        The parameter by which to sort the results. The default is - * CreationTime.

        + *

        The property used to sort results. The default value is CreationTime.

        */ - SortBy?: SortLineageGroupsBy | string; + SortBy?: SortExperimentsBy | string; /** - *

        The sort order for the results. The default is Ascending.

        + *

        The sort order. The default value is Descending.

        */ SortOrder?: SortOrder | string; /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of - * algorithms, use it in the subsequent request.

        + *

        If the previous call to ListExperiments didn't return the full set of + * experiments, the call returns a token for getting the next set of experiments.

        */ NextToken?: string; /** - *

        The maximum number of endpoints to return in the response. This value defaults to - * 10.

        + *

        The maximum number of experiments to return in the response. The default value is + * 10.

        */ MaxResults?: number; } -export namespace ListLineageGroupsRequest { +export namespace ListExperimentsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListLineageGroupsRequest): any => ({ + export const filterSensitiveLog = (obj: ListExperimentsRequest): any => ({ ...obj, }); } -export interface ListLineageGroupsResponse { +export interface ListExperimentsResponse { /** - *

        A list of lineage groups and their properties.

        + *

        A list of the summaries of your experiments.

        */ - LineageGroupSummaries?: LineageGroupSummary[]; + ExperimentSummaries?: ExperimentSummary[]; /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of - * algorithms, use it in the subsequent request.

        + *

        A token for getting the next set of experiments, if there are any.

        */ NextToken?: string; } -export namespace ListLineageGroupsResponse { +export namespace ListExperimentsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListLineageGroupsResponse): any => ({ + export const filterSensitiveLog = (obj: ListExperimentsResponse): any => ({ ...obj, }); } -export interface ListModelBiasJobDefinitionsRequest { +export interface ListFeatureGroupsRequest { /** - *

        Name of the endpoint to monitor for model bias.

        + *

        A string that partially matches one or more FeatureGroups names. Filters + * FeatureGroups by name.

        */ - EndpointName?: string; + NameContains?: string; /** - *

        Whether to sort results by the Name or CreationTime field. The - * default is CreationTime.

        + *

        A FeatureGroup status. Filters by FeatureGroup status.

        */ - SortBy?: MonitoringJobDefinitionSortKey | string; + FeatureGroupStatusEquals?: FeatureGroupStatus | string; /** - *

        Whether to sort the results in Ascending or Descending order. - * The default is Descending.

        + *

        An OfflineStore status. Filters by OfflineStore status.

        */ - SortOrder?: SortOrder | string; + OfflineStoreStatusEquals?: OfflineStoreStatusValue | string; /** - *

        The token returned if the response is truncated. To retrieve the next set of job - * executions, use it in the next request.

        + *

        Use this parameter to search for FeatureGroupss created after a specific + * date and time.

        */ - NextToken?: string; + CreationTimeAfter?: Date; /** - *

        The maximum number of model bias jobs to return in the response. The default value is - * 10.

        + *

        Use this parameter to search for FeatureGroupss created before a specific + * date and time.

        */ - MaxResults?: number; + CreationTimeBefore?: Date; /** - *

        Filter for model bias jobs whose name contains a specified string.

        + *

        The order in which feature groups are listed.

        */ - NameContains?: string; + SortOrder?: FeatureGroupSortOrder | string; /** - *

        A filter that returns only model bias jobs created before a specified time.

        + *

        The value on which the feature group list is sorted.

        */ - CreationTimeBefore?: Date; + SortBy?: FeatureGroupSortBy | string; /** - *

        A filter that returns only model bias jobs created after a specified time.

        + *

        The maximum number of results returned by ListFeatureGroups.

        */ - CreationTimeAfter?: Date; + MaxResults?: number; + + /** + *

        A token to resume pagination of ListFeatureGroups results.

        + */ + NextToken?: string; } -export namespace ListModelBiasJobDefinitionsRequest { +export namespace ListFeatureGroupsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelBiasJobDefinitionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListFeatureGroupsRequest): any => ({ ...obj, }); } -export interface ListModelBiasJobDefinitionsResponse { +export interface ListFeatureGroupsResponse { /** - *

        A JSON array in which each element is a summary for a model bias jobs.

        + *

        A summary of feature groups.

        */ - JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; + FeatureGroupSummaries: FeatureGroupSummary[] | undefined; /** - *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, - * use it in the subsequent request.

        + *

        A token to resume pagination of ListFeatureGroups results.

        */ - NextToken?: string; + NextToken: string | undefined; } -export namespace ListModelBiasJobDefinitionsResponse { +export namespace ListFeatureGroupsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelBiasJobDefinitionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListFeatureGroupsResponse): any => ({ ...obj, }); } -export interface ListModelExplainabilityJobDefinitionsRequest { +export interface ListFlowDefinitionsRequest { /** - *

        Name of the endpoint to monitor for model explainability.

        + *

        A filter that returns only flow definitions with a creation time greater than or equal to the specified timestamp.

        */ - EndpointName?: string; + CreationTimeAfter?: Date; /** - *

        Whether to sort results by the Name or CreationTime field. The - * default is CreationTime.

        + *

        A filter that returns only flow definitions that were created before the specified timestamp.

        */ - SortBy?: MonitoringJobDefinitionSortKey | string; + CreationTimeBefore?: Date; /** - *

        Whether to sort the results in Ascending or Descending order. - * The default is Descending.

        + *

        An optional value that specifies whether you want the results sorted in Ascending or Descending order.

        */ SortOrder?: SortOrder | string; /** - *

        The token returned if the response is truncated. To retrieve the next set of job - * executions, use it in the next request.

        + *

        A token to resume pagination.

        */ NextToken?: string; /** - *

        The maximum number of jobs to return in the response. The default value is 10.

        + *

        The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken will be provided in the output that you can use to resume pagination.

        */ MaxResults?: number; +} +export namespace ListFlowDefinitionsRequest { /** - *

        Filter for model explainability jobs whose name contains a specified string.

        + * @internal */ - NameContains?: string; + export const filterSensitiveLog = (obj: ListFlowDefinitionsRequest): any => ({ + ...obj, + }); +} +export interface ListFlowDefinitionsResponse { /** - *

        A filter that returns only model explainability jobs created before a specified - * time.

        + *

        An array of objects describing the flow definitions.

        */ - CreationTimeBefore?: Date; + FlowDefinitionSummaries: FlowDefinitionSummary[] | undefined; /** - *

        A filter that returns only model explainability jobs created after a specified - * time.

        + *

        A token to resume pagination.

        */ - CreationTimeAfter?: Date; + NextToken?: string; } -export namespace ListModelExplainabilityJobDefinitionsRequest { +export namespace ListFlowDefinitionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelExplainabilityJobDefinitionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListFlowDefinitionsResponse): any => ({ ...obj, }); } -export interface ListModelExplainabilityJobDefinitionsResponse { +export interface ListHumanTaskUisRequest { /** - *

        A JSON array in which each element is a summary for a explainability bias jobs.

        + *

        A filter that returns only human task user interfaces with a creation time greater than or equal to the specified timestamp.

        */ - JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; + CreationTimeAfter?: Date; /** - *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, - * use it in the subsequent request.

        + *

        A filter that returns only human task user interfaces that were created before the specified timestamp.

        */ - NextToken?: string; -} + CreationTimeBefore?: Date; -export namespace ListModelExplainabilityJobDefinitionsResponse { /** - * @internal + *

        An optional value that specifies whether you want the results sorted in Ascending or Descending order.

        */ - export const filterSensitiveLog = (obj: ListModelExplainabilityJobDefinitionsResponse): any => ({ - ...obj, - }); -} - -export enum ModelMetadataFilterType { - DOMAIN = "Domain", - FRAMEWORK = "Framework", - FRAMEWORKVERSION = "FrameworkVersion", - TASK = "Task", -} + SortOrder?: SortOrder | string; -/** - *

        Part of the search expression. You can specify the name and value - * (domain, task, framework, framework version, task, and model).

        - */ -export interface ModelMetadataFilter { /** - *

        The name of the of the model to filter by.

        + *

        A token to resume pagination.

        */ - Name: ModelMetadataFilterType | string | undefined; + NextToken?: string; /** - *

        The value to filter the model metadata.

        + *

        The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken will be provided in the output that you can use to resume pagination.

        */ - Value: string | undefined; + MaxResults?: number; } -export namespace ModelMetadataFilter { +export namespace ListHumanTaskUisRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ModelMetadataFilter): any => ({ + export const filterSensitiveLog = (obj: ListHumanTaskUisRequest): any => ({ ...obj, }); } -/** - *

        One or more filters that searches for the specified resource or resources in - * a search. All resource objects that satisfy the expression's condition are - * included in the search results

        - */ -export interface ModelMetadataSearchExpression { +export interface ListHumanTaskUisResponse { /** - *

        A list of filter objects.

        + *

        An array of objects describing the human task user interfaces.

        */ - Filters?: ModelMetadataFilter[]; + HumanTaskUiSummaries: HumanTaskUiSummary[] | undefined; + + /** + *

        A token to resume pagination.

        + */ + NextToken?: string; } -export namespace ModelMetadataSearchExpression { +export namespace ListHumanTaskUisResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ModelMetadataSearchExpression): any => ({ + export const filterSensitiveLog = (obj: ListHumanTaskUisResponse): any => ({ ...obj, }); } -export interface ListModelMetadataRequest { +export interface ListHyperParameterTuningJobsRequest { /** - *

        One or more filters that searches for the specified resource or resources - * in a search. All resource objects that satisfy the expression's condition are - * included in the search results. Specify the Framework, FrameworkVersion, Domain - * or Task to filter supported. Filter names and values are case-sensitive.

        + *

        If the result of the previous ListHyperParameterTuningJobs request was + * truncated, the response includes a NextToken. To retrieve the next set of + * tuning jobs, use the token in the next request.

        */ - SearchExpression?: ModelMetadataSearchExpression; + NextToken?: string; /** - *

        If the response to a previous ListModelMetadataResponse request was truncated, - * the response includes a NextToken. To retrieve the next set of model metadata, - * use the token in the next request.

        + *

        The + * maximum number of tuning jobs to return. The default value is + * 10.

        */ - NextToken?: string; + MaxResults?: number; /** - *

        The maximum number of models to return in the response.

        + *

        The field to sort results by. The default is Name.

        */ - MaxResults?: number; -} + SortBy?: HyperParameterTuningJobSortByOptions | string; -export namespace ListModelMetadataRequest { /** - * @internal + *

        The sort order for results. The default is Ascending.

        */ - export const filterSensitiveLog = (obj: ListModelMetadataRequest): any => ({ - ...obj, - }); -} + SortOrder?: SortOrder | string; -/** - *

        A summary of the model metadata.

        - */ -export interface ModelMetadataSummary { /** - *

        The machine learning domain of the model.

        + *

        A string in the tuning job name. This filter returns only tuning jobs whose name + * contains the specified string.

        */ - Domain: string | undefined; + NameContains?: string; /** - *

        The machine learning framework of the model.

        + *

        A filter that returns only tuning jobs that were created after the specified + * time.

        */ - Framework: string | undefined; + CreationTimeAfter?: Date; /** - *

        The machine learning task of the model.

        + *

        A filter that returns only tuning jobs that were created before the specified + * time.

        */ - Task: string | undefined; + CreationTimeBefore?: Date; /** - *

        The name of the model.

        + *

        A filter that returns only tuning jobs that were modified after the specified + * time.

        */ - Model: string | undefined; + LastModifiedTimeAfter?: Date; /** - *

        The framework version of the model.

        + *

        A filter that returns only tuning jobs that were modified before the specified + * time.

        */ - FrameworkVersion: string | undefined; + LastModifiedTimeBefore?: Date; + + /** + *

        A filter that returns only tuning jobs with the specified status.

        + */ + StatusEquals?: HyperParameterTuningJobStatus | string; } -export namespace ModelMetadataSummary { +export namespace ListHyperParameterTuningJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ModelMetadataSummary): any => ({ + export const filterSensitiveLog = (obj: ListHyperParameterTuningJobsRequest): any => ({ ...obj, }); } -export interface ListModelMetadataResponse { +export interface ListHyperParameterTuningJobsResponse { /** - *

        A structure that holds model metadata.

        + *

        A list of HyperParameterTuningJobSummary objects that + * describe + * the tuning jobs that the ListHyperParameterTuningJobs + * request returned.

        */ - ModelMetadataSummaries: ModelMetadataSummary[] | undefined; + HyperParameterTuningJobSummaries: HyperParameterTuningJobSummary[] | undefined; /** - *

        A token for getting the next set of recommendations, if there are any.

        + *

        If the result of this ListHyperParameterTuningJobs request was truncated, + * the response includes a NextToken. To retrieve the next set of tuning jobs, + * use the token in the next request.

        */ NextToken?: string; } -export namespace ListModelMetadataResponse { +export namespace ListHyperParameterTuningJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelMetadataResponse): any => ({ + export const filterSensitiveLog = (obj: ListHyperParameterTuningJobsResponse): any => ({ ...obj, }); } -export enum ModelPackageGroupSortBy { - CREATION_TIME = "CreationTime", - NAME = "Name", -} - -export interface ListModelPackageGroupsInput { +export interface ListImagesRequest { /** - *

        A filter that returns only model groups created after the specified time.

        + *

        A filter that returns only images created on or after the specified time.

        */ CreationTimeAfter?: Date; /** - *

        A filter that returns only model groups created before the specified time.

        + *

        A filter that returns only images created on or before the specified time.

        */ CreationTimeBefore?: Date; /** - *

        The maximum number of results to return in the response.

        + *

        A filter that returns only images modified on or after the specified time.

        + */ + LastModifiedTimeAfter?: Date; + + /** + *

        A filter that returns only images modified on or before the specified time.

        + */ + LastModifiedTimeBefore?: Date; + + /** + *

        The maximum number of images to return in the response. The default value is 10.

        */ MaxResults?: number; /** - *

        A string in the model group name. This filter returns only model groups whose name - * contains the specified string.

        + *

        A filter that returns only images whose name contains the specified string.

        */ NameContains?: string; /** - *

        If the result of the previous ListModelPackageGroups request was - * truncated, the response includes a NextToken. To retrieve the next set of - * model groups, use the token in the next request.

        + *

        If the previous call to ListImages didn't return the full set of images, + * the call returns a token for getting the next set of images.

        */ NextToken?: string; /** - *

        The field to sort results by. The default is CreationTime.

        + *

        The property used to sort results. The default value is CREATION_TIME.

        */ - SortBy?: ModelPackageGroupSortBy | string; + SortBy?: ImageSortBy | string; /** - *

        The sort order for results. The default is Ascending.

        + *

        The sort order. The default value is DESCENDING.

        */ - SortOrder?: SortOrder | string; + SortOrder?: ImageSortOrder | string; } -export namespace ListModelPackageGroupsInput { +export namespace ListImagesRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelPackageGroupsInput): any => ({ + export const filterSensitiveLog = (obj: ListImagesRequest): any => ({ ...obj, }); } -/** - *

        Summary information about a model group.

        - */ -export interface ModelPackageGroupSummary { - /** - *

        The name of the model group.

        - */ - ModelPackageGroupName: string | undefined; - - /** - *

        The Amazon Resource Name (ARN) of the model group.

        - */ - ModelPackageGroupArn: string | undefined; - - /** - *

        A description of the model group.

        - */ - ModelPackageGroupDescription?: string; - +export interface ListImagesResponse { /** - *

        The time that the model group was created.

        + *

        A list of images and their properties.

        */ - CreationTime: Date | undefined; + Images?: Image[]; /** - *

        The status of the model group.

        + *

        A token for getting the next set of images, if there are any.

        */ - ModelPackageGroupStatus: ModelPackageGroupStatus | string | undefined; + NextToken?: string; } -export namespace ModelPackageGroupSummary { +export namespace ListImagesResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ModelPackageGroupSummary): any => ({ + export const filterSensitiveLog = (obj: ListImagesResponse): any => ({ ...obj, }); } -export interface ListModelPackageGroupsOutput { +export interface ListImageVersionsRequest { /** - *

        A list of summaries of the model groups in your Amazon Web Services account.

        + *

        A filter that returns only versions created on or after the specified time.

        */ - ModelPackageGroupSummaryList: ModelPackageGroupSummary[] | undefined; + CreationTimeAfter?: Date; /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set - * of model groups, use it in the subsequent request.

        + *

        A filter that returns only versions created on or before the specified time.

        */ - NextToken?: string; -} + CreationTimeBefore?: Date; -export namespace ListModelPackageGroupsOutput { /** - * @internal + *

        The name of the image to list the versions of.

        */ - export const filterSensitiveLog = (obj: ListModelPackageGroupsOutput): any => ({ - ...obj, - }); -} - -export enum ModelPackageType { - BOTH = "Both", - UNVERSIONED = "Unversioned", - VERSIONED = "Versioned", -} - -export enum ModelPackageSortBy { - CREATION_TIME = "CreationTime", - NAME = "Name", -} + ImageName: string | undefined; -export interface ListModelPackagesInput { /** - *

        A filter that returns only model packages created after the specified time - * (timestamp).

        + *

        A filter that returns only versions modified on or after the specified time.

        */ - CreationTimeAfter?: Date; + LastModifiedTimeAfter?: Date; /** - *

        A filter that returns only model packages created before the specified time - * (timestamp).

        + *

        A filter that returns only versions modified on or before the specified time.

        */ - CreationTimeBefore?: Date; + LastModifiedTimeBefore?: Date; /** - *

        The maximum number of model packages to return in the response.

        + *

        The maximum number of versions to return in the response. The default value is 10.

        */ MaxResults?: number; /** - *

        A string in the model package name. This filter returns only model packages whose name - * contains the specified string.

        - */ - NameContains?: string; - - /** - *

        A filter that returns only the model packages with the specified approval - * status.

        + *

        If the previous call to ListImageVersions didn't return the full set of + * versions, the call returns a token for getting the next set of versions.

        */ - ModelApprovalStatus?: ModelApprovalStatus | string; + NextToken?: string; /** - *

        A filter that returns only model versions that belong to the specified model group.

        + *

        The property used to sort results. The default value is CREATION_TIME.

        */ - ModelPackageGroupName?: string; + SortBy?: ImageVersionSortBy | string; /** - *

        A filter that returns only the model packages of the specified type. This can be one - * of the following values.

        - *
          - *
        • - *

          - * UNVERSIONED - List only unversioined models. - * This is the default value if no ModelPackageType is specified.

          - *
        • - *
        • - *

          - * VERSIONED - List only versioned models.

          - *
        • - *
        • - *

          - * BOTH - List both versioned and unversioned models.

          - *
        • - *
        + *

        The sort order. The default value is DESCENDING.

        */ - ModelPackageType?: ModelPackageType | string; + SortOrder?: ImageVersionSortOrder | string; +} +export namespace ListImageVersionsRequest { /** - *

        If the response to a previous ListModelPackages request was truncated, - * the response includes a NextToken. To retrieve the next set of model - * packages, use the token in the next request.

        + * @internal */ - NextToken?: string; + export const filterSensitiveLog = (obj: ListImageVersionsRequest): any => ({ + ...obj, + }); +} +export interface ListImageVersionsResponse { /** - *

        The parameter by which to sort the results. The default is - * CreationTime.

        + *

        A list of versions and their properties.

        */ - SortBy?: ModelPackageSortBy | string; + ImageVersions?: ImageVersion[]; /** - *

        The sort order for the results. The default is Ascending.

        + *

        A token for getting the next set of versions, if there are any.

        */ - SortOrder?: SortOrder | string; + NextToken?: string; } -export namespace ListModelPackagesInput { +export namespace ListImageVersionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelPackagesInput): any => ({ + export const filterSensitiveLog = (obj: ListImageVersionsResponse): any => ({ ...obj, }); } -/** - *

        Provides summary information about a model package.

        - */ -export interface ModelPackageSummary { +export enum ListInferenceRecommendationsJobsSortBy { + CREATION_TIME = "CreationTime", + NAME = "Name", + STATUS = "Status", +} + +export interface ListInferenceRecommendationsJobsRequest { /** - *

        The name of the model package.

        + *

        A filter that returns only jobs created after the specified time (timestamp).

        */ - ModelPackageName: string | undefined; + CreationTimeAfter?: Date; /** - *

        If the model package is a versioned model, the model group that the versioned model - * belongs to.

        + *

        A filter that returns only jobs created before the specified time (timestamp).

        */ - ModelPackageGroupName?: string; + CreationTimeBefore?: Date; /** - *

        If the model package is a versioned model, the version of the model.

        + *

        A filter that returns only jobs that were last modified after the specified time (timestamp).

        */ - ModelPackageVersion?: number; + LastModifiedTimeAfter?: Date; /** - *

        The Amazon Resource Name (ARN) of the model package.

        + *

        A filter that returns only jobs that were last modified before the specified time (timestamp).

        */ - ModelPackageArn: string | undefined; + LastModifiedTimeBefore?: Date; /** - *

        A brief description of the model package.

        + *

        A string in the job name. This filter returns only recommendations whose name contains the specified string.

        */ - ModelPackageDescription?: string; + NameContains?: string; /** - *

        A timestamp that shows when the model package was created.

        + *

        A filter that retrieves only inference recommendations jobs with a specific status.

        */ - CreationTime: Date | undefined; + StatusEquals?: RecommendationJobStatus | string; /** - *

        The overall status of the model package.

        + *

        The parameter by which to sort the results.

        */ - ModelPackageStatus: ModelPackageStatus | string | undefined; + SortBy?: ListInferenceRecommendationsJobsSortBy | string; /** - *

        The approval status of the model. This can be one of the following values.

        - *
          - *
        • - *

          - * APPROVED - The model is approved

          - *
        • - *
        • - *

          - * REJECTED - The model is rejected.

          - *
        • - *
        • - *

          - * PENDING_MANUAL_APPROVAL - The model is waiting for manual - * approval.

          - *
        • - *
        + *

        The sort order for the results.

        + */ + SortOrder?: SortOrder | string; + + /** + *

        If the response to a previous ListInferenceRecommendationsJobsRequest request + * was truncated, the response includes a NextToken. To retrieve the next set + * of recommendations, use the token in the next request.

        */ - ModelApprovalStatus?: ModelApprovalStatus | string; + NextToken?: string; + + /** + *

        The maximum number of recommendations to return in the response.

        + */ + MaxResults?: number; } -export namespace ModelPackageSummary { +export namespace ListInferenceRecommendationsJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ModelPackageSummary): any => ({ + export const filterSensitiveLog = (obj: ListInferenceRecommendationsJobsRequest): any => ({ ...obj, }); } -export interface ListModelPackagesOutput { +export interface ListInferenceRecommendationsJobsResponse { /** - *

        An array of ModelPackageSummary objects, each of which lists a model - * package.

        + *

        The recommendations created from the Amazon SageMaker Inference Recommender job.

        */ - ModelPackageSummaryList: ModelPackageSummary[] | undefined; + InferenceRecommendationsJobs: InferenceRecommendationsJob[] | undefined; /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of - * model packages, use it in the subsequent request.

        + *

        A token for getting the next set of recommendations, if there are any.

        */ NextToken?: string; } -export namespace ListModelPackagesOutput { +export namespace ListInferenceRecommendationsJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelPackagesOutput): any => ({ + export const filterSensitiveLog = (obj: ListInferenceRecommendationsJobsResponse): any => ({ ...obj, }); } -export interface ListModelQualityJobDefinitionsRequest { +export enum SortBy { + CREATION_TIME = "CreationTime", + NAME = "Name", + STATUS = "Status", +} + +export interface ListLabelingJobsRequest { /** - *

        A filter that returns only model quality monitoring job definitions that are associated - * with the specified endpoint.

        + *

        A filter that returns only labeling jobs created after the specified time + * (timestamp).

        */ - EndpointName?: string; + CreationTimeAfter?: Date; /** - *

        The field to sort results by. The default is CreationTime.

        + *

        A filter that returns only labeling jobs created before the specified time + * (timestamp).

        */ - SortBy?: MonitoringJobDefinitionSortKey | string; + CreationTimeBefore?: Date; /** - *

        The sort order for results. The default is Descending.

        + *

        A filter that returns only labeling jobs modified after the specified time + * (timestamp).

        */ - SortOrder?: SortOrder | string; + LastModifiedTimeAfter?: Date; /** - *

        If the result of the previous ListModelQualityJobDefinitions request was - * truncated, the response includes a NextToken. To retrieve the next set of - * model quality monitoring job definitions, use the token in the next request.

        + *

        A filter that returns only labeling jobs modified before the specified time + * (timestamp).

        */ - NextToken?: string; + LastModifiedTimeBefore?: Date; /** - *

        The maximum number of results to return in a call to - * ListModelQualityJobDefinitions.

        + *

        The maximum number of labeling jobs to return in each page of the response.

        */ MaxResults?: number; /** - *

        A string in the transform job name. This filter returns only model quality monitoring - * job definitions whose name contains the specified string.

        + *

        If the result of the previous ListLabelingJobs request was truncated, the + * response includes a NextToken. To retrieve the next set of labeling jobs, + * use the token in the next request.

        + */ + NextToken?: string; + + /** + *

        A string in the labeling job name. This filter returns only labeling jobs whose name + * contains the specified string.

        */ NameContains?: string; /** - *

        A filter that returns only model quality monitoring job definitions created before the - * specified time.

        + *

        The field to sort results by. The default is CreationTime.

        */ - CreationTimeBefore?: Date; + SortBy?: SortBy | string; /** - *

        A filter that returns only model quality monitoring job definitions created after the - * specified time.

        + *

        The sort order for results. The default is Ascending.

        */ - CreationTimeAfter?: Date; + SortOrder?: SortOrder | string; + + /** + *

        A filter that retrieves only labeling jobs with a specific status.

        + */ + StatusEquals?: LabelingJobStatus | string; } -export namespace ListModelQualityJobDefinitionsRequest { +export namespace ListLabelingJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelQualityJobDefinitionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListLabelingJobsRequest): any => ({ ...obj, }); } -export interface ListModelQualityJobDefinitionsResponse { +export interface ListLabelingJobsResponse { /** - *

        A list of summaries of model quality monitoring job definitions.

        + *

        An array of LabelingJobSummary objects, each describing a labeling + * job.

        */ - JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; + LabelingJobSummaryList?: LabelingJobSummary[]; /** - *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of model - * quality monitoring job definitions, use it in the next request.

        + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of + * labeling jobs, use it in the subsequent request.

        */ NextToken?: string; } -export namespace ListModelQualityJobDefinitionsResponse { +export namespace ListLabelingJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelQualityJobDefinitionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListLabelingJobsResponse): any => ({ ...obj, }); } -export enum ModelSortKey { - CreationTime = "CreationTime", - Name = "Name", +export enum ListLabelingJobsForWorkteamSortByOptions { + CREATION_TIME = "CreationTime", } -export interface ListModelsInput { +export interface ListLabelingJobsForWorkteamRequest { /** - *

        Sorts the list of results. The default is CreationTime.

        + *

        The Amazon Resource Name (ARN) of the work team for which you want to see labeling + * jobs for.

        */ - SortBy?: ModelSortKey | string; + WorkteamArn: string | undefined; /** - *

        The sort order for results. The default is Descending.

        + *

        The maximum number of labeling jobs to return in each page of the response.

        */ - SortOrder?: OrderKey | string; + MaxResults?: number; /** - *

        If the response to a previous ListModels request was truncated, the - * response includes a NextToken. To retrieve the next set of models, use the - * token in the next request.

        + *

        If the result of the previous ListLabelingJobsForWorkteam request was + * truncated, the response includes a NextToken. To retrieve the next set of + * labeling jobs, use the token in the next request.

        */ NextToken?: string; /** - *

        The maximum number of models to return in the response.

        + *

        A filter that returns only labeling jobs created after the specified time + * (timestamp).

        */ - MaxResults?: number; + CreationTimeAfter?: Date; /** - *

        A string in the model name. This filter returns only models whose - * name contains the specified string.

        + *

        A filter that returns only labeling jobs created before the specified time + * (timestamp).

        */ - NameContains?: string; + CreationTimeBefore?: Date; /** - *

        A filter that returns only models created before the specified time - * (timestamp).

        + *

        A filter the limits jobs to only the ones whose job reference code contains the + * specified string.

        */ - CreationTimeBefore?: Date; + JobReferenceCodeContains?: string; /** - *

        A filter that returns only models with a creation time greater than or equal to the - * specified time (timestamp).

        + *

        The field to sort results by. The default is CreationTime.

        */ - CreationTimeAfter?: Date; + SortBy?: ListLabelingJobsForWorkteamSortByOptions | string; + + /** + *

        The sort order for results. The default is Ascending.

        + */ + SortOrder?: SortOrder | string; } -export namespace ListModelsInput { +export namespace ListLabelingJobsForWorkteamRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelsInput): any => ({ + export const filterSensitiveLog = (obj: ListLabelingJobsForWorkteamRequest): any => ({ ...obj, }); } -/** - *

        Provides summary information about a model.

        - */ -export interface ModelSummary { - /** - *

        The name of the model that you want a summary for.

        - */ - ModelName: string | undefined; - +export interface ListLabelingJobsForWorkteamResponse { /** - *

        The Amazon Resource Name (ARN) of the model.

        + *

        An array of LabelingJobSummary objects, each describing a labeling + * job.

        */ - ModelArn: string | undefined; + LabelingJobSummaryList: LabelingJobForWorkteamSummary[] | undefined; /** - *

        A timestamp that indicates when the model was created.

        + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of + * labeling jobs, use it in the subsequent request.

        */ - CreationTime: Date | undefined; + NextToken?: string; } -export namespace ModelSummary { +export namespace ListLabelingJobsForWorkteamResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ModelSummary): any => ({ + export const filterSensitiveLog = (obj: ListLabelingJobsForWorkteamResponse): any => ({ ...obj, }); } -export interface ListModelsOutput { +export enum SortLineageGroupsBy { + CREATION_TIME = "CreationTime", + NAME = "Name", +} + +export interface ListLineageGroupsRequest { /** - *

        An array of ModelSummary objects, each of which lists a - * model.

        + *

        A timestamp to filter against lineage groups created after a certain point in time.

        */ - Models: ModelSummary[] | undefined; + CreatedAfter?: Date; /** - *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of - * models, use it in the subsequent request.

        + *

        A timestamp to filter against lineage groups created before a certain point in time.

        + */ + CreatedBefore?: Date; + + /** + *

        The parameter by which to sort the results. The default is + * CreationTime.

        + */ + SortBy?: SortLineageGroupsBy | string; + + /** + *

        The sort order for the results. The default is Ascending.

        + */ + SortOrder?: SortOrder | string; + + /** + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of + * algorithms, use it in the subsequent request.

        */ NextToken?: string; + + /** + *

        The maximum number of endpoints to return in the response. This value defaults to + * 10.

        + */ + MaxResults?: number; } -export namespace ListModelsOutput { +export namespace ListLineageGroupsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelsOutput): any => ({ + export const filterSensitiveLog = (obj: ListLineageGroupsRequest): any => ({ ...obj, }); } -export enum MonitoringExecutionSortKey { - CREATION_TIME = "CreationTime", - SCHEDULED_TIME = "ScheduledTime", - STATUS = "Status", +export interface ListLineageGroupsResponse { + /** + *

        A list of lineage groups and their properties.

        + */ + LineageGroupSummaries?: LineageGroupSummary[]; + + /** + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of + * algorithms, use it in the subsequent request.

        + */ + NextToken?: string; } -export interface ListMonitoringExecutionsRequest { +export namespace ListLineageGroupsResponse { /** - *

        Name of a specific schedule to fetch jobs for.

        + * @internal */ - MonitoringScheduleName?: string; + export const filterSensitiveLog = (obj: ListLineageGroupsResponse): any => ({ + ...obj, + }); +} +export interface ListModelBiasJobDefinitionsRequest { /** - *

        Name of a specific endpoint to fetch jobs for.

        + *

        Name of the endpoint to monitor for model bias.

        */ EndpointName?: string; /** - *

        Whether to sort results by Status, CreationTime, - * ScheduledTime field. The default is CreationTime.

        + *

        Whether to sort results by the Name or CreationTime field. The + * default is CreationTime.

        */ - SortBy?: MonitoringExecutionSortKey | string; + SortBy?: MonitoringJobDefinitionSortKey | string; /** *

        Whether to sort the results in Ascending or Descending order. @@ -9499,106 +9501,69 @@ export interface ListMonitoringExecutionsRequest { NextToken?: string; /** - *

        The maximum number of jobs to return in the response. The default value is 10.

        + *

        The maximum number of model bias jobs to return in the response. The default value is + * 10.

        */ MaxResults?: number; /** - *

        Filter for jobs scheduled before a specified time.

        - */ - ScheduledTimeBefore?: Date; - - /** - *

        Filter for jobs scheduled after a specified time.

        + *

        Filter for model bias jobs whose name contains a specified string.

        */ - ScheduledTimeAfter?: Date; + NameContains?: string; /** - *

        A filter that returns only jobs created before a specified time.

        + *

        A filter that returns only model bias jobs created before a specified time.

        */ CreationTimeBefore?: Date; /** - *

        A filter that returns only jobs created after a specified time.

        + *

        A filter that returns only model bias jobs created after a specified time.

        */ CreationTimeAfter?: Date; - - /** - *

        A filter that returns only jobs modified after a specified time.

        - */ - LastModifiedTimeBefore?: Date; - - /** - *

        A filter that returns only jobs modified before a specified time.

        - */ - LastModifiedTimeAfter?: Date; - - /** - *

        A filter that retrieves only jobs with a specific status.

        - */ - StatusEquals?: ExecutionStatus | string; - - /** - *

        Gets a list of the monitoring job runs of the specified monitoring job - * definitions.

        - */ - MonitoringJobDefinitionName?: string; - - /** - *

        A filter that returns only the monitoring job runs of the specified monitoring - * type.

        - */ - MonitoringTypeEquals?: MonitoringType | string; } -export namespace ListMonitoringExecutionsRequest { +export namespace ListModelBiasJobDefinitionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListMonitoringExecutionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListModelBiasJobDefinitionsRequest): any => ({ ...obj, }); } -export interface ListMonitoringExecutionsResponse { +export interface ListModelBiasJobDefinitionsResponse { /** - *

        A JSON array in which each element is a summary for a monitoring execution.

        + *

        A JSON array in which each element is a summary for a model bias jobs.

        */ - MonitoringExecutionSummaries: MonitoringExecutionSummary[] | undefined; + JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; /** *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, - * use it in the subsequent reques

        + * use it in the subsequent request.

        */ NextToken?: string; } -export namespace ListMonitoringExecutionsResponse { +export namespace ListModelBiasJobDefinitionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListMonitoringExecutionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListModelBiasJobDefinitionsResponse): any => ({ ...obj, }); } -export enum MonitoringScheduleSortKey { - CREATION_TIME = "CreationTime", - NAME = "Name", - STATUS = "Status", -} - -export interface ListMonitoringSchedulesRequest { +export interface ListModelExplainabilityJobDefinitionsRequest { /** - *

        Name of a specific endpoint to fetch schedules for.

        + *

        Name of the endpoint to monitor for model explainability.

        */ EndpointName?: string; /** - *

        Whether to sort results by Status, CreationTime, - * ScheduledTime field. The default is CreationTime.

        + *

        Whether to sort results by the Name or CreationTime field. The + * default is CreationTime.

        */ - SortBy?: MonitoringScheduleSortKey | string; + SortBy?: MonitoringJobDefinitionSortKey | string; /** *

        Whether to sort the results in Ascending or Descending order. @@ -9618,1391 +9583,1427 @@ export interface ListMonitoringSchedulesRequest { MaxResults?: number; /** - *

        Filter for monitoring schedules whose name contains a specified string.

        + *

        Filter for model explainability jobs whose name contains a specified string.

        */ NameContains?: string; /** - *

        A filter that returns only monitoring schedules created before a specified time.

        + *

        A filter that returns only model explainability jobs created before a specified + * time.

        */ CreationTimeBefore?: Date; /** - *

        A filter that returns only monitoring schedules created after a specified time.

        + *

        A filter that returns only model explainability jobs created after a specified + * time.

        */ CreationTimeAfter?: Date; +} +export namespace ListModelExplainabilityJobDefinitionsRequest { /** - *

        A filter that returns only monitoring schedules modified before a specified time.

        + * @internal */ - LastModifiedTimeBefore?: Date; + export const filterSensitiveLog = (obj: ListModelExplainabilityJobDefinitionsRequest): any => ({ + ...obj, + }); +} +export interface ListModelExplainabilityJobDefinitionsResponse { /** - *

        A filter that returns only monitoring schedules modified after a specified time.

        + *

        A JSON array in which each element is a summary for a explainability bias jobs.

        */ - LastModifiedTimeAfter?: Date; + JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; /** - *

        A filter that returns only monitoring schedules modified before a specified time.

        + *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, + * use it in the subsequent request.

        */ - StatusEquals?: ScheduleStatus | string; + NextToken?: string; +} +export namespace ListModelExplainabilityJobDefinitionsResponse { /** - *

        Gets a list of the monitoring schedules for the specified monitoring job - * definition.

        + * @internal */ - MonitoringJobDefinitionName?: string; + export const filterSensitiveLog = (obj: ListModelExplainabilityJobDefinitionsResponse): any => ({ + ...obj, + }); +} + +export enum ModelMetadataFilterType { + DOMAIN = "Domain", + FRAMEWORK = "Framework", + FRAMEWORKVERSION = "FrameworkVersion", + TASK = "Task", +} +/** + *

        Part of the search expression. You can specify the name and value + * (domain, task, framework, framework version, task, and model).

        + */ +export interface ModelMetadataFilter { /** - *

        A filter that returns only the monitoring schedules for the specified monitoring - * type.

        + *

        The name of the of the model to filter by.

        */ - MonitoringTypeEquals?: MonitoringType | string; + Name: ModelMetadataFilterType | string | undefined; + + /** + *

        The value to filter the model metadata.

        + */ + Value: string | undefined; } -export namespace ListMonitoringSchedulesRequest { +export namespace ModelMetadataFilter { /** * @internal */ - export const filterSensitiveLog = (obj: ListMonitoringSchedulesRequest): any => ({ + export const filterSensitiveLog = (obj: ModelMetadataFilter): any => ({ ...obj, }); } /** - *

        Summarizes the monitoring schedule.

        + *

        One or more filters that searches for the specified resource or resources in + * a search. All resource objects that satisfy the expression's condition are + * included in the search results

        */ -export interface MonitoringScheduleSummary { +export interface ModelMetadataSearchExpression { /** - *

        The name of the monitoring schedule.

        + *

        A list of filter objects.

        */ - MonitoringScheduleName: string | undefined; + Filters?: ModelMetadataFilter[]; +} +export namespace ModelMetadataSearchExpression { /** - *

        The Amazon Resource Name (ARN) of the monitoring schedule.

        + * @internal */ - MonitoringScheduleArn: string | undefined; + export const filterSensitiveLog = (obj: ModelMetadataSearchExpression): any => ({ + ...obj, + }); +} +export interface ListModelMetadataRequest { /** - *

        The creation time of the monitoring schedule.

        + *

        One or more filters that searches for the specified resource or resources + * in a search. All resource objects that satisfy the expression's condition are + * included in the search results. Specify the Framework, FrameworkVersion, Domain + * or Task to filter supported. Filter names and values are case-sensitive.

        */ - CreationTime: Date | undefined; + SearchExpression?: ModelMetadataSearchExpression; /** - *

        The last time the monitoring schedule was modified.

        + *

        If the response to a previous ListModelMetadataResponse request was truncated, + * the response includes a NextToken. To retrieve the next set of model metadata, + * use the token in the next request.

        */ - LastModifiedTime: Date | undefined; + NextToken?: string; /** - *

        The status of the monitoring schedule.

        + *

        The maximum number of models to return in the response.

        */ - MonitoringScheduleStatus: ScheduleStatus | string | undefined; + MaxResults?: number; +} +export namespace ListModelMetadataRequest { /** - *

        The name of the endpoint using the monitoring schedule.

        + * @internal */ - EndpointName?: string; + export const filterSensitiveLog = (obj: ListModelMetadataRequest): any => ({ + ...obj, + }); +} +/** + *

        A summary of the model metadata.

        + */ +export interface ModelMetadataSummary { /** - *

        The name of the monitoring job definition that the schedule is for.

        + *

        The machine learning domain of the model.

        */ - MonitoringJobDefinitionName?: string; + Domain: string | undefined; /** - *

        The type of the monitoring job definition that the schedule is for.

        + *

        The machine learning framework of the model.

        */ - MonitoringType?: MonitoringType | string; + Framework: string | undefined; + + /** + *

        The machine learning task of the model.

        + */ + Task: string | undefined; + + /** + *

        The name of the model.

        + */ + Model: string | undefined; + + /** + *

        The framework version of the model.

        + */ + FrameworkVersion: string | undefined; } -export namespace MonitoringScheduleSummary { +export namespace ModelMetadataSummary { /** * @internal */ - export const filterSensitiveLog = (obj: MonitoringScheduleSummary): any => ({ + export const filterSensitiveLog = (obj: ModelMetadataSummary): any => ({ ...obj, }); } -export interface ListMonitoringSchedulesResponse { +export interface ListModelMetadataResponse { /** - *

        A JSON array in which each element is a summary for a monitoring schedule.

        + *

        A structure that holds model metadata.

        */ - MonitoringScheduleSummaries: MonitoringScheduleSummary[] | undefined; + ModelMetadataSummaries: ModelMetadataSummary[] | undefined; /** - *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, - * use it in the subsequent request.

        + *

        A token for getting the next set of recommendations, if there are any.

        */ NextToken?: string; } -export namespace ListMonitoringSchedulesResponse { +export namespace ListModelMetadataResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListMonitoringSchedulesResponse): any => ({ + export const filterSensitiveLog = (obj: ListModelMetadataResponse): any => ({ ...obj, }); } -export enum NotebookInstanceLifecycleConfigSortKey { +export enum ModelPackageGroupSortBy { CREATION_TIME = "CreationTime", - LAST_MODIFIED_TIME = "LastModifiedTime", NAME = "Name", } -export enum NotebookInstanceLifecycleConfigSortOrder { - ASCENDING = "Ascending", - DESCENDING = "Descending", -} - -export interface ListNotebookInstanceLifecycleConfigsInput { - /** - *

        If the result of a ListNotebookInstanceLifecycleConfigs request was - * truncated, the response includes a NextToken. To get the next set of - * lifecycle configurations, use the token in the next request.

        - */ - NextToken?: string; - - /** - *

        The maximum number of lifecycle configurations to return in the response.

        - */ - MaxResults?: number; - +export interface ListModelPackageGroupsInput { /** - *

        Sorts the list of results. The default is CreationTime.

        + *

        A filter that returns only model groups created after the specified time.

        */ - SortBy?: NotebookInstanceLifecycleConfigSortKey | string; + CreationTimeAfter?: Date; /** - *

        The sort order for results.

        + *

        A filter that returns only model groups created before the specified time.

        */ - SortOrder?: NotebookInstanceLifecycleConfigSortOrder | string; + CreationTimeBefore?: Date; /** - *

        A string in the lifecycle configuration name. This filter returns only lifecycle - * configurations whose name contains the specified string.

        + *

        The maximum number of results to return in the response.

        */ - NameContains?: string; + MaxResults?: number; /** - *

        A filter that returns only lifecycle configurations that were created before the - * specified time (timestamp).

        + *

        A string in the model group name. This filter returns only model groups whose name + * contains the specified string.

        */ - CreationTimeBefore?: Date; + NameContains?: string; /** - *

        A filter that returns only lifecycle configurations that were created after the - * specified time (timestamp).

        + *

        If the result of the previous ListModelPackageGroups request was + * truncated, the response includes a NextToken. To retrieve the next set of + * model groups, use the token in the next request.

        */ - CreationTimeAfter?: Date; + NextToken?: string; /** - *

        A filter that returns only lifecycle configurations that were modified before the - * specified time (timestamp).

        + *

        The field to sort results by. The default is CreationTime.

        */ - LastModifiedTimeBefore?: Date; + SortBy?: ModelPackageGroupSortBy | string; /** - *

        A filter that returns only lifecycle configurations that were modified after the - * specified time (timestamp).

        + *

        The sort order for results. The default is Ascending.

        */ - LastModifiedTimeAfter?: Date; + SortOrder?: SortOrder | string; } -export namespace ListNotebookInstanceLifecycleConfigsInput { +export namespace ListModelPackageGroupsInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListNotebookInstanceLifecycleConfigsInput): any => ({ + export const filterSensitiveLog = (obj: ListModelPackageGroupsInput): any => ({ ...obj, }); } /** - *

        Provides a summary of a notebook instance lifecycle configuration.

        + *

        Summary information about a model group.

        */ -export interface NotebookInstanceLifecycleConfigSummary { +export interface ModelPackageGroupSummary { /** - *

        The name of the lifecycle configuration.

        + *

        The name of the model group.

        */ - NotebookInstanceLifecycleConfigName: string | undefined; + ModelPackageGroupName: string | undefined; /** - *

        The Amazon Resource Name (ARN) of the lifecycle configuration.

        + *

        The Amazon Resource Name (ARN) of the model group.

        */ - NotebookInstanceLifecycleConfigArn: string | undefined; + ModelPackageGroupArn: string | undefined; /** - *

        A timestamp that tells when the lifecycle configuration was created.

        + *

        A description of the model group.

        */ - CreationTime?: Date; + ModelPackageGroupDescription?: string; /** - *

        A timestamp that tells when the lifecycle configuration was last modified.

        + *

        The time that the model group was created.

        */ - LastModifiedTime?: Date; + CreationTime: Date | undefined; + + /** + *

        The status of the model group.

        + */ + ModelPackageGroupStatus: ModelPackageGroupStatus | string | undefined; } -export namespace NotebookInstanceLifecycleConfigSummary { +export namespace ModelPackageGroupSummary { /** * @internal */ - export const filterSensitiveLog = (obj: NotebookInstanceLifecycleConfigSummary): any => ({ + export const filterSensitiveLog = (obj: ModelPackageGroupSummary): any => ({ ...obj, }); } -export interface ListNotebookInstanceLifecycleConfigsOutput { +export interface ListModelPackageGroupsOutput { /** - *

        If the response is truncated, SageMaker returns this token. To get the next set of - * lifecycle configurations, use it in the next request.

        + *

        A list of summaries of the model groups in your Amazon Web Services account.

        */ - NextToken?: string; + ModelPackageGroupSummaryList: ModelPackageGroupSummary[] | undefined; /** - *

        An array of NotebookInstanceLifecycleConfiguration objects, each listing - * a lifecycle configuration.

        + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set + * of model groups, use it in the subsequent request.

        */ - NotebookInstanceLifecycleConfigs?: NotebookInstanceLifecycleConfigSummary[]; + NextToken?: string; } -export namespace ListNotebookInstanceLifecycleConfigsOutput { +export namespace ListModelPackageGroupsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListNotebookInstanceLifecycleConfigsOutput): any => ({ + export const filterSensitiveLog = (obj: ListModelPackageGroupsOutput): any => ({ ...obj, }); } -export enum NotebookInstanceSortKey { +export enum ModelPackageType { + BOTH = "Both", + UNVERSIONED = "Unversioned", + VERSIONED = "Versioned", +} + +export enum ModelPackageSortBy { CREATION_TIME = "CreationTime", NAME = "Name", - STATUS = "Status", } -export enum NotebookInstanceSortOrder { - ASCENDING = "Ascending", - DESCENDING = "Descending", -} +export interface ListModelPackagesInput { + /** + *

        A filter that returns only model packages created after the specified time + * (timestamp).

        + */ + CreationTimeAfter?: Date; -export interface ListNotebookInstancesInput { /** - *

        If the previous call to the ListNotebookInstances is truncated, the - * response includes a NextToken. You can use this token in your subsequent - * ListNotebookInstances request to fetch the next set of notebook - * instances.

        - * - *

        You might specify a filter or a sort order in your request. When response is - * truncated, you must use the same values for the filer and sort order in the next - * request.

        - *
        + *

        A filter that returns only model packages created before the specified time + * (timestamp).

        */ - NextToken?: string; + CreationTimeBefore?: Date; /** - *

        The maximum number of notebook instances to return.

        + *

        The maximum number of model packages to return in the response.

        */ MaxResults?: number; /** - *

        The field to sort results by. The default is Name.

        + *

        A string in the model package name. This filter returns only model packages whose name + * contains the specified string.

        */ - SortBy?: NotebookInstanceSortKey | string; + NameContains?: string; /** - *

        The sort order for results.

        + *

        A filter that returns only the model packages with the specified approval + * status.

        */ - SortOrder?: NotebookInstanceSortOrder | string; + ModelApprovalStatus?: ModelApprovalStatus | string; /** - *

        A string in the notebook instances' name. This filter returns only notebook - * instances whose name contains the specified string.

        + *

        A filter that returns only model versions that belong to the specified model group.

        */ - NameContains?: string; + ModelPackageGroupName?: string; /** - *

        A filter that returns only notebook instances that were created before the - * specified time (timestamp).

        + *

        A filter that returns only the model packages of the specified type. This can be one + * of the following values.

        + *
          + *
        • + *

          + * UNVERSIONED - List only unversioined models. + * This is the default value if no ModelPackageType is specified.

          + *
        • + *
        • + *

          + * VERSIONED - List only versioned models.

          + *
        • + *
        • + *

          + * BOTH - List both versioned and unversioned models.

          + *
        • + *
        */ - CreationTimeBefore?: Date; + ModelPackageType?: ModelPackageType | string; /** - *

        A filter that returns only notebook instances that were created after the specified - * time (timestamp).

        + *

        If the response to a previous ListModelPackages request was truncated, + * the response includes a NextToken. To retrieve the next set of model + * packages, use the token in the next request.

        */ - CreationTimeAfter?: Date; + NextToken?: string; /** - *

        A filter that returns only notebook instances that were modified before the - * specified time (timestamp).

        + *

        The parameter by which to sort the results. The default is + * CreationTime.

        */ - LastModifiedTimeBefore?: Date; + SortBy?: ModelPackageSortBy | string; /** - *

        A filter that returns only notebook instances that were modified after the - * specified time (timestamp).

        + *

        The sort order for the results. The default is Ascending.

        */ - LastModifiedTimeAfter?: Date; + SortOrder?: SortOrder | string; +} +export namespace ListModelPackagesInput { /** - *

        A filter that returns only notebook instances with the specified status.

        + * @internal */ - StatusEquals?: NotebookInstanceStatus | string; + export const filterSensitiveLog = (obj: ListModelPackagesInput): any => ({ + ...obj, + }); +} +/** + *

        Provides summary information about a model package.

        + */ +export interface ModelPackageSummary { /** - *

        A string in the name of a notebook instances lifecycle configuration associated with - * this notebook instance. This filter returns only notebook instances associated with a - * lifecycle configuration with a name that contains the specified string.

        + *

        The name of the model package.

        */ - NotebookInstanceLifecycleConfigNameContains?: string; + ModelPackageName: string | undefined; /** - *

        A string in the name or URL of a Git repository associated with this notebook - * instance. This filter returns only notebook instances associated with a git repository - * with a name that contains the specified string.

        + *

        If the model package is a versioned model, the model group that the versioned model + * belongs to.

        + */ + ModelPackageGroupName?: string; + + /** + *

        If the model package is a versioned model, the version of the model.

        + */ + ModelPackageVersion?: number; + + /** + *

        The Amazon Resource Name (ARN) of the model package.

        + */ + ModelPackageArn: string | undefined; + + /** + *

        A brief description of the model package.

        + */ + ModelPackageDescription?: string; + + /** + *

        A timestamp that shows when the model package was created.

        + */ + CreationTime: Date | undefined; + + /** + *

        The overall status of the model package.

        + */ + ModelPackageStatus: ModelPackageStatus | string | undefined; + + /** + *

        The approval status of the model. This can be one of the following values.

        + *
          + *
        • + *

          + * APPROVED - The model is approved

          + *
        • + *
        • + *

          + * REJECTED - The model is rejected.

          + *
        • + *
        • + *

          + * PENDING_MANUAL_APPROVAL - The model is waiting for manual + * approval.

          + *
        • + *
        + */ + ModelApprovalStatus?: ModelApprovalStatus | string; +} + +export namespace ModelPackageSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModelPackageSummary): any => ({ + ...obj, + }); +} + +export interface ListModelPackagesOutput { + /** + *

        An array of ModelPackageSummary objects, each of which lists a model + * package.

        */ - DefaultCodeRepositoryContains?: string; + ModelPackageSummaryList: ModelPackageSummary[] | undefined; /** - *

        A filter that returns only notebook instances with associated with the specified git - * repository.

        + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of + * model packages, use it in the subsequent request.

        */ - AdditionalCodeRepositoryEquals?: string; + NextToken?: string; } -export namespace ListNotebookInstancesInput { +export namespace ListModelPackagesOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListNotebookInstancesInput): any => ({ + export const filterSensitiveLog = (obj: ListModelPackagesOutput): any => ({ ...obj, }); } -/** - *

        Provides summary information for an SageMaker notebook instance.

        - */ -export interface NotebookInstanceSummary { - /** - *

        The name of the notebook instance that you want a summary for.

        - */ - NotebookInstanceName: string | undefined; - - /** - *

        The Amazon Resource Name (ARN) of the notebook instance.

        - */ - NotebookInstanceArn: string | undefined; - +export interface ListModelQualityJobDefinitionsRequest { /** - *

        The status of the notebook instance.

        + *

        A filter that returns only model quality monitoring job definitions that are associated + * with the specified endpoint.

        */ - NotebookInstanceStatus?: NotebookInstanceStatus | string; + EndpointName?: string; /** - *

        The URL that you use to connect to the Jupyter notebook running in your notebook - * instance.

        + *

        The field to sort results by. The default is CreationTime.

        */ - Url?: string; + SortBy?: MonitoringJobDefinitionSortKey | string; /** - *

        The type of ML compute instance that the notebook instance is running on.

        + *

        The sort order for results. The default is Descending.

        */ - InstanceType?: _InstanceType | string; + SortOrder?: SortOrder | string; /** - *

        A timestamp that shows when the notebook instance was created.

        + *

        If the result of the previous ListModelQualityJobDefinitions request was + * truncated, the response includes a NextToken. To retrieve the next set of + * model quality monitoring job definitions, use the token in the next request.

        */ - CreationTime?: Date; + NextToken?: string; /** - *

        A timestamp that shows when the notebook instance was last modified.

        + *

        The maximum number of results to return in a call to + * ListModelQualityJobDefinitions.

        */ - LastModifiedTime?: Date; + MaxResults?: number; /** - *

        The name of a notebook instance lifecycle configuration associated with this notebook - * instance.

        - *

        For information about notebook instance lifestyle configurations, see Step - * 2.1: (Optional) Customize a Notebook Instance.

        + *

        A string in the transform job name. This filter returns only model quality monitoring + * job definitions whose name contains the specified string.

        */ - NotebookInstanceLifecycleConfigName?: string; + NameContains?: string; /** - *

        The Git repository associated with the notebook instance as its default code - * repository. This can be either the name of a Git repository stored as a resource in your - * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any - * other Git repository. When you open a notebook instance, it opens in the directory that - * contains this repository. For more information, see Associating Git Repositories with SageMaker - * Notebook Instances.

        + *

        A filter that returns only model quality monitoring job definitions created before the + * specified time.

        */ - DefaultCodeRepository?: string; + CreationTimeBefore?: Date; /** - *

        An array of up to three Git repositories associated with the notebook instance. These - * can be either the names of Git repositories stored as resources in your account, or the - * URL of Git repositories in Amazon Web Services CodeCommit or in any - * other Git repository. These repositories are cloned at the same level as the default - * repository of your notebook instance. For more information, see Associating Git - * Repositories with SageMaker Notebook Instances.

        + *

        A filter that returns only model quality monitoring job definitions created after the + * specified time.

        */ - AdditionalCodeRepositories?: string[]; + CreationTimeAfter?: Date; } -export namespace NotebookInstanceSummary { +export namespace ListModelQualityJobDefinitionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: NotebookInstanceSummary): any => ({ + export const filterSensitiveLog = (obj: ListModelQualityJobDefinitionsRequest): any => ({ ...obj, }); } -export interface ListNotebookInstancesOutput { +export interface ListModelQualityJobDefinitionsResponse { /** - *

        If the response to the previous ListNotebookInstances request was - * truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use - * the token in the next request.

        + *

        A list of summaries of model quality monitoring job definitions.

        */ - NextToken?: string; + JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; /** - *

        An array of NotebookInstanceSummary objects, one for each notebook - * instance.

        + *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of model + * quality monitoring job definitions, use it in the next request.

        */ - NotebookInstances?: NotebookInstanceSummary[]; + NextToken?: string; } -export namespace ListNotebookInstancesOutput { +export namespace ListModelQualityJobDefinitionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListNotebookInstancesOutput): any => ({ + export const filterSensitiveLog = (obj: ListModelQualityJobDefinitionsResponse): any => ({ ...obj, }); } -export enum SortPipelineExecutionsBy { - CREATION_TIME = "CreationTime", - PIPELINE_EXECUTION_ARN = "PipelineExecutionArn", +export enum ModelSortKey { + CreationTime = "CreationTime", + Name = "Name", } -export interface ListPipelineExecutionsRequest { +export interface ListModelsInput { /** - *

        The name of the pipeline.

        + *

        Sorts the list of results. The default is CreationTime.

        */ - PipelineName: string | undefined; + SortBy?: ModelSortKey | string; /** - *

        A filter that returns the pipeline executions that were created after a specified - * time.

        + *

        The sort order for results. The default is Descending.

        */ - CreatedAfter?: Date; + SortOrder?: OrderKey | string; /** - *

        A filter that returns the pipeline executions that were created before a specified - * time.

        + *

        If the response to a previous ListModels request was truncated, the + * response includes a NextToken. To retrieve the next set of models, use the + * token in the next request.

        */ - CreatedBefore?: Date; + NextToken?: string; /** - *

        The field by which to sort results. The default is CreatedTime.

        + *

        The maximum number of models to return in the response.

        */ - SortBy?: SortPipelineExecutionsBy | string; + MaxResults?: number; /** - *

        The sort order for results.

        + *

        A string in the model name. This filter returns only models whose + * name contains the specified string.

        */ - SortOrder?: SortOrder | string; + NameContains?: string; /** - *

        If the result of the previous ListPipelineExecutions request was truncated, - * the response includes a NextToken. To retrieve the next set of pipeline executions, use the token in the next request.

        + *

        A filter that returns only models created before the specified time + * (timestamp).

        */ - NextToken?: string; + CreationTimeBefore?: Date; /** - *

        The maximum number of pipeline executions to return in the response.

        + *

        A filter that returns only models with a creation time greater than or equal to the + * specified time (timestamp).

        */ - MaxResults?: number; + CreationTimeAfter?: Date; } -export namespace ListPipelineExecutionsRequest { +export namespace ListModelsInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListPipelineExecutionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListModelsInput): any => ({ ...obj, }); } /** - *

        A pipeline execution summary.

        + *

        Provides summary information about a model.

        */ -export interface PipelineExecutionSummary { +export interface ModelSummary { /** - *

        The Amazon Resource Name (ARN) of the pipeline execution.

        + *

        The name of the model that you want a summary for.

        */ - PipelineExecutionArn?: string; + ModelName: string | undefined; /** - *

        The start time of the pipeline execution.

        + *

        The Amazon Resource Name (ARN) of the model.

        */ - StartTime?: Date; + ModelArn: string | undefined; /** - *

        The status of the pipeline execution.

        + *

        A timestamp that indicates when the model was created.

        */ - PipelineExecutionStatus?: PipelineExecutionStatus | string; + CreationTime: Date | undefined; +} +export namespace ModelSummary { /** - *

        The description of the pipeline execution.

        + * @internal */ - PipelineExecutionDescription?: string; + export const filterSensitiveLog = (obj: ModelSummary): any => ({ + ...obj, + }); +} +export interface ListModelsOutput { /** - *

        The display name of the pipeline execution.

        + *

        An array of ModelSummary objects, each of which lists a + * model.

        */ - PipelineExecutionDisplayName?: string; + Models: ModelSummary[] | undefined; /** - *

        A message generated by SageMaker Pipelines describing why the pipeline execution failed.

        + *

        If the response is truncated, SageMaker returns this token. To retrieve the next set of + * models, use it in the subsequent request.

        */ - PipelineExecutionFailureReason?: string; + NextToken?: string; } -export namespace PipelineExecutionSummary { +export namespace ListModelsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: PipelineExecutionSummary): any => ({ + export const filterSensitiveLog = (obj: ListModelsOutput): any => ({ ...obj, }); } -export interface ListPipelineExecutionsResponse { +export enum MonitoringExecutionSortKey { + CREATION_TIME = "CreationTime", + SCHEDULED_TIME = "ScheduledTime", + STATUS = "Status", +} + +export interface ListMonitoringExecutionsRequest { /** - *

        Contains a sorted list of pipeline execution summary objects matching the specified - * filters. Each run summary includes the Amazon Resource Name (ARN) of the pipeline execution, the run date, - * and the status. This list can be empty.

        + *

        Name of a specific schedule to fetch jobs for.

        */ - PipelineExecutionSummaries?: PipelineExecutionSummary[]; + MonitoringScheduleName?: string; /** - *

        If the result of the previous ListPipelineExecutions request was truncated, - * the response includes a NextToken. To retrieve the next set of pipeline executions, use the token in the next request.

        + *

        Name of a specific endpoint to fetch jobs for.

        */ - NextToken?: string; -} + EndpointName?: string; -export namespace ListPipelineExecutionsResponse { /** - * @internal + *

        Whether to sort results by Status, CreationTime, + * ScheduledTime field. The default is CreationTime.

        */ - export const filterSensitiveLog = (obj: ListPipelineExecutionsResponse): any => ({ - ...obj, - }); -} + SortBy?: MonitoringExecutionSortKey | string; -export interface ListPipelineExecutionStepsRequest { /** - *

        The Amazon Resource Name (ARN) of the pipeline execution.

        + *

        Whether to sort the results in Ascending or Descending order. + * The default is Descending.

        */ - PipelineExecutionArn?: string; + SortOrder?: SortOrder | string; /** - *

        If the result of the previous ListPipelineExecutionSteps request was truncated, - * the response includes a NextToken. To retrieve the next set of pipeline execution steps, use the token in the next request.

        + *

        The token returned if the response is truncated. To retrieve the next set of job + * executions, use it in the next request.

        */ NextToken?: string; /** - *

        The maximum number of pipeline execution steps to return in the response.

        + *

        The maximum number of jobs to return in the response. The default value is 10.

        */ MaxResults?: number; /** - *

        The field by which to sort results. The default is CreatedTime.

        + *

        Filter for jobs scheduled before a specified time.

        */ - SortOrder?: SortOrder | string; -} + ScheduledTimeBefore?: Date; -export namespace ListPipelineExecutionStepsRequest { /** - * @internal + *

        Filter for jobs scheduled after a specified time.

        */ - export const filterSensitiveLog = (obj: ListPipelineExecutionStepsRequest): any => ({ - ...obj, - }); -} + ScheduledTimeAfter?: Date; + + /** + *

        A filter that returns only jobs created before a specified time.

        + */ + CreationTimeBefore?: Date; + + /** + *

        A filter that returns only jobs created after a specified time.

        + */ + CreationTimeAfter?: Date; + + /** + *

        A filter that returns only jobs modified after a specified time.

        + */ + LastModifiedTimeBefore?: Date; + + /** + *

        A filter that returns only jobs modified before a specified time.

        + */ + LastModifiedTimeAfter?: Date; -/** - *

        Metadata for Model steps.

        - */ -export interface ModelStepMetadata { /** - *

        The Amazon Resource Name (ARN) of the created model.

        + *

        A filter that retrieves only jobs with a specific status.

        */ - Arn?: string; -} + StatusEquals?: ExecutionStatus | string; -export namespace ModelStepMetadata { /** - * @internal + *

        Gets a list of the monitoring job runs of the specified monitoring job + * definitions.

        */ - export const filterSensitiveLog = (obj: ModelStepMetadata): any => ({ - ...obj, - }); -} + MonitoringJobDefinitionName?: string; -/** - *

        Metadata for a processing job step.

        - */ -export interface ProcessingJobStepMetadata { /** - *

        The Amazon Resource Name (ARN) of the processing job.

        + *

        A filter that returns only the monitoring job runs of the specified monitoring + * type.

        */ - Arn?: string; + MonitoringTypeEquals?: MonitoringType | string; } -export namespace ProcessingJobStepMetadata { +export namespace ListMonitoringExecutionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ProcessingJobStepMetadata): any => ({ + export const filterSensitiveLog = (obj: ListMonitoringExecutionsRequest): any => ({ ...obj, }); } -/** - *

        Container for the metadata for a Quality check step. For more information, see - * the topic on QualityCheck step in the Amazon SageMaker Developer Guide. - *

        - */ -export interface QualityCheckStepMetadata { - /** - *

        The type of the Quality check step.

        - */ - CheckType?: string; - +export interface ListMonitoringExecutionsResponse { /** - *

        The Amazon S3 URI of the baseline statistics file used for the drift check.

        + *

        A JSON array in which each element is a summary for a monitoring execution.

        */ - BaselineUsedForDriftCheckStatistics?: string; + MonitoringExecutionSummaries: MonitoringExecutionSummary[] | undefined; /** - *

        The Amazon S3 URI of the baseline constraints file used for the drift check.

        + *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, + * use it in the subsequent reques

        */ - BaselineUsedForDriftCheckConstraints?: string; + NextToken?: string; +} +export namespace ListMonitoringExecutionsResponse { /** - *

        The Amazon S3 URI of the newly calculated baseline statistics file.

        + * @internal */ - CalculatedBaselineStatistics?: string; + export const filterSensitiveLog = (obj: ListMonitoringExecutionsResponse): any => ({ + ...obj, + }); +} - /** - *

        The Amazon S3 URI of the newly calculated baseline constraints file.

        - */ - CalculatedBaselineConstraints?: string; +export enum MonitoringScheduleSortKey { + CREATION_TIME = "CreationTime", + NAME = "Name", + STATUS = "Status", +} +export interface ListMonitoringSchedulesRequest { /** - *

        The model package group name.

        + *

        Name of a specific endpoint to fetch schedules for.

        */ - ModelPackageGroupName?: string; + EndpointName?: string; /** - *

        The Amazon S3 URI of violation report if violations are detected.

        + *

        Whether to sort results by Status, CreationTime, + * ScheduledTime field. The default is CreationTime.

        */ - ViolationReport?: string; + SortBy?: MonitoringScheduleSortKey | string; /** - *

        The Amazon Resource Name (ARN) of the Quality check processing job that was run by this step execution.

        + *

        Whether to sort the results in Ascending or Descending order. + * The default is Descending.

        */ - CheckJobArn?: string; + SortOrder?: SortOrder | string; /** - *

        This flag indicates if the drift check against the previous baseline will be skipped or not. - * If it is set to False, the previous baseline of the configured check type must be available.

        + *

        The token returned if the response is truncated. To retrieve the next set of job + * executions, use it in the next request.

        */ - SkipCheck?: boolean; + NextToken?: string; /** - *

        This flag indicates if a newly calculated baseline can be accessed through step properties - * BaselineUsedForDriftCheckConstraints and BaselineUsedForDriftCheckStatistics. - * If it is set to False, the previous baseline of the configured check type must also be available. - * These can be accessed through the BaselineUsedForDriftCheckConstraints and - * BaselineUsedForDriftCheckStatistics properties.

        + *

        The maximum number of jobs to return in the response. The default value is 10.

        */ - RegisterNewBaseline?: boolean; -} + MaxResults?: number; -export namespace QualityCheckStepMetadata { /** - * @internal + *

        Filter for monitoring schedules whose name contains a specified string.

        */ - export const filterSensitiveLog = (obj: QualityCheckStepMetadata): any => ({ - ...obj, - }); -} + NameContains?: string; -/** - *

        Metadata for a register model job step.

        - */ -export interface RegisterModelStepMetadata { /** - *

        The Amazon Resource Name (ARN) of the model package.

        + *

        A filter that returns only monitoring schedules created before a specified time.

        */ - Arn?: string; -} + CreationTimeBefore?: Date; -export namespace RegisterModelStepMetadata { /** - * @internal + *

        A filter that returns only monitoring schedules created after a specified time.

        */ - export const filterSensitiveLog = (obj: RegisterModelStepMetadata): any => ({ - ...obj, - }); -} + CreationTimeAfter?: Date; -/** - *

        Metadata for a training job step.

        - */ -export interface TrainingJobStepMetadata { /** - *

        The Amazon Resource Name (ARN) of the training job that was run by this step execution.

        + *

        A filter that returns only monitoring schedules modified before a specified time.

        */ - Arn?: string; -} + LastModifiedTimeBefore?: Date; -export namespace TrainingJobStepMetadata { /** - * @internal + *

        A filter that returns only monitoring schedules modified after a specified time.

        */ - export const filterSensitiveLog = (obj: TrainingJobStepMetadata): any => ({ - ...obj, - }); -} + LastModifiedTimeAfter?: Date; -/** - *

        Metadata for a transform job step.

        - */ -export interface TransformJobStepMetadata { /** - *

        The Amazon Resource Name (ARN) of the transform job that was run by this step execution.

        + *

        A filter that returns only monitoring schedules modified before a specified time.

        */ - Arn?: string; -} + StatusEquals?: ScheduleStatus | string; -export namespace TransformJobStepMetadata { /** - * @internal + *

        Gets a list of the monitoring schedules for the specified monitoring job + * definition.

        */ - export const filterSensitiveLog = (obj: TransformJobStepMetadata): any => ({ - ...obj, - }); -} + MonitoringJobDefinitionName?: string; -/** - *

        Metadata for a tuning step.

        - */ -export interface TuningJobStepMetaData { /** - *

        The Amazon Resource Name (ARN) of the tuning job that was run by this step execution.

        + *

        A filter that returns only the monitoring schedules for the specified monitoring + * type.

        */ - Arn?: string; + MonitoringTypeEquals?: MonitoringType | string; } -export namespace TuningJobStepMetaData { +export namespace ListMonitoringSchedulesRequest { /** * @internal */ - export const filterSensitiveLog = (obj: TuningJobStepMetaData): any => ({ + export const filterSensitiveLog = (obj: ListMonitoringSchedulesRequest): any => ({ ...obj, }); } /** - *

        Metadata for a step execution.

        + *

        Summarizes the monitoring schedule.

        */ -export interface PipelineExecutionStepMetadata { - /** - *

        The Amazon Resource Name (ARN) of the training job that was run by this step execution.

        - */ - TrainingJob?: TrainingJobStepMetadata; - - /** - *

        The Amazon Resource Name (ARN) of the processing job that was run by this step execution.

        - */ - ProcessingJob?: ProcessingJobStepMetadata; - +export interface MonitoringScheduleSummary { /** - *

        The Amazon Resource Name (ARN) of the transform job that was run by this step execution.

        + *

        The name of the monitoring schedule.

        */ - TransformJob?: TransformJobStepMetadata; + MonitoringScheduleName: string | undefined; /** - *

        The Amazon Resource Name (ARN) of the tuning job that was run by this step execution.

        + *

        The Amazon Resource Name (ARN) of the monitoring schedule.

        */ - TuningJob?: TuningJobStepMetaData; + MonitoringScheduleArn: string | undefined; /** - *

        The Amazon Resource Name (ARN) of the model that was created by this step execution.

        + *

        The creation time of the monitoring schedule.

        */ - Model?: ModelStepMetadata; + CreationTime: Date | undefined; /** - *

        The Amazon Resource Name (ARN) of the model package the model was registered to by this step execution.

        + *

        The last time the monitoring schedule was modified.

        */ - RegisterModel?: RegisterModelStepMetadata; + LastModifiedTime: Date | undefined; /** - *

        The outcome of the condition evaluation that was run by this step execution.

        + *

        The status of the monitoring schedule.

        */ - Condition?: ConditionStepMetadata; + MonitoringScheduleStatus: ScheduleStatus | string | undefined; /** - *

        The URL of the Amazon SQS queue used by this step execution, the pipeline generated token, - * and a list of output parameters.

        + *

        The name of the endpoint using the monitoring schedule.

        */ - Callback?: CallbackStepMetadata; + EndpointName?: string; /** - *

        The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution and a list of - * output parameters.

        + *

        The name of the monitoring job definition that the schedule is for.

        */ - Lambda?: LambdaStepMetadata; + MonitoringJobDefinitionName?: string; /** - *

        The configurations and outcomes of the check step execution. This includes:

        - *
          - *
        • - *

          The type of the check conducted,

          - *
        • - *
        • - *

          The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check.

          - *
        • - *
        • - *

          The Amazon S3 URIs of newly calculated baseline constraints and statistics.

          - *
        • - *
        • - *

          The model package group name provided.

          - *
        • - *
        • - *

          The Amazon S3 URI of the violation report if violations detected.

          - *
        • - *
        • - *

          The Amazon Resource Name (ARN) of check processing job initiated by the step execution.

          - *
        • - *
        • - *

          The boolean flags indicating if the drift check is skipped.

          - *
        • - *
        • - *

          If step property BaselineUsedForDriftCheck is set the same as - * CalculatedBaseline.

          - *
        • - *
        + *

        The type of the monitoring job definition that the schedule is for.

        */ - QualityCheck?: QualityCheckStepMetadata; + MonitoringType?: MonitoringType | string; +} +export namespace MonitoringScheduleSummary { /** - *

        Container for the metadata for a Clarify check step. The configurations - * and outcomes of the check step execution. This includes:

        - *
          - *
        • - *

          The type of the check conducted,

          - *
        • - *
        • - *

          The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check.

          - *
        • - *
        • - *

          The Amazon S3 URIs of newly calculated baseline constraints and statistics.

          - *
        • - *
        • - *

          The model package group name provided.

          - *
        • - *
        • - *

          The Amazon S3 URI of the violation report if violations detected.

          - *
        • - *
        • - *

          The Amazon Resource Name (ARN) of check processing job initiated by the step execution.

          - *
        • - *
        • - *

          The boolean flags indicating if the drift check is skipped.

          - *
        • - *
        • - *

          If step property BaselineUsedForDriftCheck is set the same as - * CalculatedBaseline.

          - *
        • - *
        + * @internal */ - ClarifyCheck?: ClarifyCheckStepMetadata; + export const filterSensitiveLog = (obj: MonitoringScheduleSummary): any => ({ + ...obj, + }); +} +export interface ListMonitoringSchedulesResponse { /** - *

        The configurations and outcomes of an EMR step execution.

        + *

        A JSON array in which each element is a summary for a monitoring schedule.

        */ - EMR?: EMRStepMetadata; + MonitoringScheduleSummaries: MonitoringScheduleSummary[] | undefined; /** - *

        The configurations and outcomes of a Fail step execution.

        + *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, + * use it in the subsequent request.

        */ - Fail?: FailStepMetadata; + NextToken?: string; } -export namespace PipelineExecutionStepMetadata { +export namespace ListMonitoringSchedulesResponse { /** * @internal */ - export const filterSensitiveLog = (obj: PipelineExecutionStepMetadata): any => ({ + export const filterSensitiveLog = (obj: ListMonitoringSchedulesResponse): any => ({ ...obj, }); } -export enum StepStatus { - EXECUTING = "Executing", - FAILED = "Failed", - STARTING = "Starting", - STOPPED = "Stopped", - STOPPING = "Stopping", - SUCCEEDED = "Succeeded", +export enum NotebookInstanceLifecycleConfigSortKey { + CREATION_TIME = "CreationTime", + LAST_MODIFIED_TIME = "LastModifiedTime", + NAME = "Name", } -/** - *

        An execution of a step in a pipeline.

        - */ -export interface PipelineExecutionStep { +export enum NotebookInstanceLifecycleConfigSortOrder { + ASCENDING = "Ascending", + DESCENDING = "Descending", +} + +export interface ListNotebookInstanceLifecycleConfigsInput { /** - *

        The name of the step that is executed.

        + *

        If the result of a ListNotebookInstanceLifecycleConfigs request was + * truncated, the response includes a NextToken. To get the next set of + * lifecycle configurations, use the token in the next request.

        */ - StepName?: string; + NextToken?: string; /** - *

        The display name of the step.

        + *

        The maximum number of lifecycle configurations to return in the response.

        */ - StepDisplayName?: string; + MaxResults?: number; /** - *

        The description of the step.

        + *

        Sorts the list of results. The default is CreationTime.

        */ - StepDescription?: string; + SortBy?: NotebookInstanceLifecycleConfigSortKey | string; /** - *

        The time that the step started executing.

        + *

        The sort order for results.

        */ - StartTime?: Date; + SortOrder?: NotebookInstanceLifecycleConfigSortOrder | string; /** - *

        The time that the step stopped executing.

        + *

        A string in the lifecycle configuration name. This filter returns only lifecycle + * configurations whose name contains the specified string.

        */ - EndTime?: Date; + NameContains?: string; /** - *

        The status of the step execution.

        + *

        A filter that returns only lifecycle configurations that were created before the + * specified time (timestamp).

        */ - StepStatus?: StepStatus | string; + CreationTimeBefore?: Date; /** - *

        If this pipeline execution step was cached, details on the cache hit.

        + *

        A filter that returns only lifecycle configurations that were created after the + * specified time (timestamp).

        */ - CacheHitResult?: CacheHitResult; + CreationTimeAfter?: Date; /** - *

        The current attempt of the execution step. For more information, see Retry Policy for SageMaker Pipelines steps.

        + *

        A filter that returns only lifecycle configurations that were modified before the + * specified time (timestamp).

        */ - AttemptCount?: number; + LastModifiedTimeBefore?: Date; /** - *

        The reason why the step failed execution. This is only returned if the step failed its execution.

        + *

        A filter that returns only lifecycle configurations that were modified after the + * specified time (timestamp).

        */ - FailureReason?: string; + LastModifiedTimeAfter?: Date; +} +export namespace ListNotebookInstanceLifecycleConfigsInput { /** - *

        Metadata for the step execution.

        + * @internal */ - Metadata?: PipelineExecutionStepMetadata; + export const filterSensitiveLog = (obj: ListNotebookInstanceLifecycleConfigsInput): any => ({ + ...obj, + }); } -export namespace PipelineExecutionStep { +/** + *

        Provides a summary of a notebook instance lifecycle configuration.

        + */ +export interface NotebookInstanceLifecycleConfigSummary { + /** + *

        The name of the lifecycle configuration.

        + */ + NotebookInstanceLifecycleConfigName: string | undefined; + /** - * @internal + *

        The Amazon Resource Name (ARN) of the lifecycle configuration.

        */ - export const filterSensitiveLog = (obj: PipelineExecutionStep): any => ({ - ...obj, - }); -} + NotebookInstanceLifecycleConfigArn: string | undefined; -export interface ListPipelineExecutionStepsResponse { /** - *

        A list of PipeLineExecutionStep objects. Each - * PipeLineExecutionStep consists of StepName, StartTime, EndTime, StepStatus, - * and Metadata. Metadata is an object with properties for each job that contains relevant - * information about the job created by the step.

        + *

        A timestamp that tells when the lifecycle configuration was created.

        */ - PipelineExecutionSteps?: PipelineExecutionStep[]; + CreationTime?: Date; /** - *

        If the result of the previous ListPipelineExecutionSteps request was truncated, - * the response includes a NextToken. To retrieve the next set of pipeline execution steps, use the token in the next request.

        + *

        A timestamp that tells when the lifecycle configuration was last modified.

        */ - NextToken?: string; + LastModifiedTime?: Date; } -export namespace ListPipelineExecutionStepsResponse { +export namespace NotebookInstanceLifecycleConfigSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListPipelineExecutionStepsResponse): any => ({ + export const filterSensitiveLog = (obj: NotebookInstanceLifecycleConfigSummary): any => ({ ...obj, }); } -export interface ListPipelineParametersForExecutionRequest { - /** - *

        The Amazon Resource Name (ARN) of the pipeline execution.

        - */ - PipelineExecutionArn: string | undefined; - +export interface ListNotebookInstanceLifecycleConfigsOutput { /** - *

        If the result of the previous ListPipelineParametersForExecution request was truncated, - * the response includes a NextToken. To retrieve the next set of parameters, use the token in the next request.

        + *

        If the response is truncated, SageMaker returns this token. To get the next set of + * lifecycle configurations, use it in the next request.

        */ NextToken?: string; /** - *

        The maximum number of parameters to return in the response.

        + *

        An array of NotebookInstanceLifecycleConfiguration objects, each listing + * a lifecycle configuration.

        */ - MaxResults?: number; + NotebookInstanceLifecycleConfigs?: NotebookInstanceLifecycleConfigSummary[]; } -export namespace ListPipelineParametersForExecutionRequest { +export namespace ListNotebookInstanceLifecycleConfigsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListPipelineParametersForExecutionRequest): any => ({ + export const filterSensitiveLog = (obj: ListNotebookInstanceLifecycleConfigsOutput): any => ({ ...obj, }); } -/** - *

        Assigns a value to a named Pipeline parameter.

        - */ -export interface Parameter { +export enum NotebookInstanceSortKey { + CREATION_TIME = "CreationTime", + NAME = "Name", + STATUS = "Status", +} + +export enum NotebookInstanceSortOrder { + ASCENDING = "Ascending", + DESCENDING = "Descending", +} + +export interface ListNotebookInstancesInput { /** - *

        The name of the parameter to assign a value to. This - * parameter name must match a named parameter in the - * pipeline definition.

        + *

        If the previous call to the ListNotebookInstances is truncated, the + * response includes a NextToken. You can use this token in your subsequent + * ListNotebookInstances request to fetch the next set of notebook + * instances.

        + * + *

        You might specify a filter or a sort order in your request. When response is + * truncated, you must use the same values for the filer and sort order in the next + * request.

        + *
        */ - Name: string | undefined; + NextToken?: string; /** - *

        The literal value for the parameter.

        + *

        The maximum number of notebook instances to return.

        */ - Value: string | undefined; -} + MaxResults?: number; -export namespace Parameter { /** - * @internal + *

        The field to sort results by. The default is Name.

        */ - export const filterSensitiveLog = (obj: Parameter): any => ({ - ...obj, - }); -} + SortBy?: NotebookInstanceSortKey | string; -export interface ListPipelineParametersForExecutionResponse { /** - *

        Contains a list of pipeline parameters. This list can be empty.

        + *

        The sort order for results.

        */ - PipelineParameters?: Parameter[]; + SortOrder?: NotebookInstanceSortOrder | string; /** - *

        If the result of the previous ListPipelineParametersForExecution request was truncated, - * the response includes a NextToken. To retrieve the next set of parameters, use the token in the next request.

        + *

        A string in the notebook instances' name. This filter returns only notebook + * instances whose name contains the specified string.

        */ - NextToken?: string; -} + NameContains?: string; -export namespace ListPipelineParametersForExecutionResponse { /** - * @internal + *

        A filter that returns only notebook instances that were created before the + * specified time (timestamp).

        */ - export const filterSensitiveLog = (obj: ListPipelineParametersForExecutionResponse): any => ({ - ...obj, - }); -} - -export enum SortPipelinesBy { - CREATION_TIME = "CreationTime", - NAME = "Name", -} + CreationTimeBefore?: Date; -export interface ListPipelinesRequest { /** - *

        The prefix of the pipeline name.

        + *

        A filter that returns only notebook instances that were created after the specified + * time (timestamp).

        */ - PipelineNamePrefix?: string; + CreationTimeAfter?: Date; /** - *

        A filter that returns the pipelines that were created after a specified - * time.

        + *

        A filter that returns only notebook instances that were modified before the + * specified time (timestamp).

        */ - CreatedAfter?: Date; + LastModifiedTimeBefore?: Date; /** - *

        A filter that returns the pipelines that were created before a specified - * time.

        + *

        A filter that returns only notebook instances that were modified after the + * specified time (timestamp).

        */ - CreatedBefore?: Date; + LastModifiedTimeAfter?: Date; /** - *

        The field by which to sort results. The default is CreatedTime.

        + *

        A filter that returns only notebook instances with the specified status.

        */ - SortBy?: SortPipelinesBy | string; + StatusEquals?: NotebookInstanceStatus | string; /** - *

        The sort order for results.

        + *

        A string in the name of a notebook instances lifecycle configuration associated with + * this notebook instance. This filter returns only notebook instances associated with a + * lifecycle configuration with a name that contains the specified string.

        */ - SortOrder?: SortOrder | string; + NotebookInstanceLifecycleConfigNameContains?: string; /** - *

        If the result of the previous ListPipelines request was truncated, - * the response includes a NextToken. To retrieve the next set of pipelines, use the token in the next request.

        + *

        A string in the name or URL of a Git repository associated with this notebook + * instance. This filter returns only notebook instances associated with a git repository + * with a name that contains the specified string.

        */ - NextToken?: string; + DefaultCodeRepositoryContains?: string; /** - *

        The maximum number of pipelines to return in the response.

        + *

        A filter that returns only notebook instances with associated with the specified git + * repository.

        */ - MaxResults?: number; + AdditionalCodeRepositoryEquals?: string; } -export namespace ListPipelinesRequest { +export namespace ListNotebookInstancesInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListPipelinesRequest): any => ({ + export const filterSensitiveLog = (obj: ListNotebookInstancesInput): any => ({ ...obj, }); } /** - *

        A summary of a pipeline.

        + *

        Provides summary information for an SageMaker notebook instance.

        */ -export interface PipelineSummary { +export interface NotebookInstanceSummary { /** - *

        The Amazon Resource Name (ARN) of the pipeline.

        + *

        The name of the notebook instance that you want a summary for.

        */ - PipelineArn?: string; + NotebookInstanceName: string | undefined; /** - *

        The name of the pipeline.

        + *

        The Amazon Resource Name (ARN) of the notebook instance.

        */ - PipelineName?: string; + NotebookInstanceArn: string | undefined; /** - *

        The display name of the pipeline.

        + *

        The status of the notebook instance.

        */ - PipelineDisplayName?: string; + NotebookInstanceStatus?: NotebookInstanceStatus | string; /** - *

        The description of the pipeline.

        + *

        The URL that you use to connect to the Jupyter notebook running in your notebook + * instance.

        */ - PipelineDescription?: string; + Url?: string; /** - *

        The Amazon Resource Name (ARN) that the pipeline used to execute.

        + *

        The type of ML compute instance that the notebook instance is running on.

        */ - RoleArn?: string; + InstanceType?: _InstanceType | string; /** - *

        The creation time of the pipeline.

        + *

        A timestamp that shows when the notebook instance was created.

        */ CreationTime?: Date; /** - *

        The time that the pipeline was last modified.

        + *

        A timestamp that shows when the notebook instance was last modified.

        */ LastModifiedTime?: Date; /** - *

        The last time that a pipeline execution began.

        - */ - LastExecutionTime?: Date; -} - -export namespace PipelineSummary { - /** - * @internal + *

        The name of a notebook instance lifecycle configuration associated with this notebook + * instance.

        + *

        For information about notebook instance lifestyle configurations, see Step + * 2.1: (Optional) Customize a Notebook Instance.

        */ - export const filterSensitiveLog = (obj: PipelineSummary): any => ({ - ...obj, - }); -} + NotebookInstanceLifecycleConfigName?: string; -export interface ListPipelinesResponse { /** - *

        Contains a sorted list of PipelineSummary objects matching the specified - * filters. Each PipelineSummary consists of PipelineArn, PipelineName, - * ExperimentName, PipelineDescription, CreationTime, LastModifiedTime, LastRunTime, and - * RoleArn. This list can be empty.

        + *

        The Git repository associated with the notebook instance as its default code + * repository. This can be either the name of a Git repository stored as a resource in your + * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any + * other Git repository. When you open a notebook instance, it opens in the directory that + * contains this repository. For more information, see Associating Git Repositories with SageMaker + * Notebook Instances.

        */ - PipelineSummaries?: PipelineSummary[]; + DefaultCodeRepository?: string; /** - *

        If the result of the previous ListPipelines request was truncated, - * the response includes a NextToken. To retrieve the next set of pipelines, use the token in the next request.

        + *

        An array of up to three Git repositories associated with the notebook instance. These + * can be either the names of Git repositories stored as resources in your account, or the + * URL of Git repositories in Amazon Web Services CodeCommit or in any + * other Git repository. These repositories are cloned at the same level as the default + * repository of your notebook instance. For more information, see Associating Git + * Repositories with SageMaker Notebook Instances.

        */ - NextToken?: string; + AdditionalCodeRepositories?: string[]; } -export namespace ListPipelinesResponse { +export namespace NotebookInstanceSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListPipelinesResponse): any => ({ + export const filterSensitiveLog = (obj: NotebookInstanceSummary): any => ({ ...obj, }); } -export interface ListProcessingJobsRequest { +export interface ListNotebookInstancesOutput { /** - *

        A filter that returns only processing jobs created after the specified time.

        + *

        If the response to the previous ListNotebookInstances request was + * truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use + * the token in the next request.

        */ - CreationTimeAfter?: Date; + NextToken?: string; /** - *

        A filter that returns only processing jobs created after the specified time.

        + *

        An array of NotebookInstanceSummary objects, one for each notebook + * instance.

        */ - CreationTimeBefore?: Date; + NotebookInstances?: NotebookInstanceSummary[]; +} +export namespace ListNotebookInstancesOutput { /** - *

        A filter that returns only processing jobs modified after the specified time.

        + * @internal */ - LastModifiedTimeAfter?: Date; + export const filterSensitiveLog = (obj: ListNotebookInstancesOutput): any => ({ + ...obj, + }); +} + +export enum SortPipelineExecutionsBy { + CREATION_TIME = "CreationTime", + PIPELINE_EXECUTION_ARN = "PipelineExecutionArn", +} +export interface ListPipelineExecutionsRequest { /** - *

        A filter that returns only processing jobs modified before the specified time.

        + *

        The name of the pipeline.

        */ - LastModifiedTimeBefore?: Date; + PipelineName: string | undefined; /** - *

        A string in the processing job name. This filter returns only processing jobs whose - * name contains the specified string.

        + *

        A filter that returns the pipeline executions that were created after a specified + * time.

        */ - NameContains?: string; + CreatedAfter?: Date; /** - *

        A filter that retrieves only processing jobs with a specific status.

        + *

        A filter that returns the pipeline executions that were created before a specified + * time.

        */ - StatusEquals?: ProcessingJobStatus | string; + CreatedBefore?: Date; /** - *

        The field to sort results by. The default is CreationTime.

        + *

        The field by which to sort results. The default is CreatedTime.

        */ - SortBy?: SortBy | string; + SortBy?: SortPipelineExecutionsBy | string; /** - *

        The sort order for results. The default is Ascending.

        + *

        The sort order for results.

        */ SortOrder?: SortOrder | string; /** - *

        If the result of the previous ListProcessingJobs request was truncated, - * the response includes a NextToken. To retrieve the next set of processing - * jobs, use the token in the next request.

        + *

        If the result of the previous ListPipelineExecutions request was truncated, + * the response includes a NextToken. To retrieve the next set of pipeline executions, use the token in the next request.

        */ NextToken?: string; /** - *

        The maximum number of processing jobs to return in the response.

        + *

        The maximum number of pipeline executions to return in the response.

        */ MaxResults?: number; } -export namespace ListProcessingJobsRequest { +export namespace ListPipelineExecutionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListProcessingJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListPipelineExecutionsRequest): any => ({ ...obj, }); } /** - *

        Summary of information about a processing job.

        + *

        A pipeline execution summary.

        */ -export interface ProcessingJobSummary { - /** - *

        The name of the processing job.

        - */ - ProcessingJobName: string | undefined; - - /** - *

        The Amazon Resource Name (ARN) of the processing job..

        - */ - ProcessingJobArn: string | undefined; - +export interface PipelineExecutionSummary { /** - *

        The time at which the processing job was created.

        + *

        The Amazon Resource Name (ARN) of the pipeline execution.

        */ - CreationTime: Date | undefined; + PipelineExecutionArn?: string; /** - *

        The time at which the processing job completed.

        + *

        The start time of the pipeline execution.

        */ - ProcessingEndTime?: Date; + StartTime?: Date; /** - *

        A timestamp that indicates the last time the processing job was modified.

        + *

        The status of the pipeline execution.

        */ - LastModifiedTime?: Date; + PipelineExecutionStatus?: PipelineExecutionStatus | string; /** - *

        The status of the processing job.

        + *

        The description of the pipeline execution.

        */ - ProcessingJobStatus: ProcessingJobStatus | string | undefined; + PipelineExecutionDescription?: string; /** - *

        A string, up to one KB in size, that contains the reason a processing job failed, if - * it failed.

        + *

        The display name of the pipeline execution.

        */ - FailureReason?: string; + PipelineExecutionDisplayName?: string; /** - *

        An optional string, up to one KB in size, that contains metadata from the processing - * container when the processing job exits.

        + *

        A message generated by SageMaker Pipelines describing why the pipeline execution failed.

        */ - ExitMessage?: string; + PipelineExecutionFailureReason?: string; } -export namespace ProcessingJobSummary { +export namespace PipelineExecutionSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ProcessingJobSummary): any => ({ + export const filterSensitiveLog = (obj: PipelineExecutionSummary): any => ({ ...obj, }); } -export interface ListProcessingJobsResponse { +export interface ListPipelineExecutionsResponse { /** - *

        An array of ProcessingJobSummary objects, each listing a processing - * job.

        + *

        Contains a sorted list of pipeline execution summary objects matching the specified + * filters. Each run summary includes the Amazon Resource Name (ARN) of the pipeline execution, the run date, + * and the status. This list can be empty.

        */ - ProcessingJobSummaries: ProcessingJobSummary[] | undefined; + PipelineExecutionSummaries?: PipelineExecutionSummary[]; /** - *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of - * processing jobs, use it in the subsequent request.

        + *

        If the result of the previous ListPipelineExecutions request was truncated, + * the response includes a NextToken. To retrieve the next set of pipeline executions, use the token in the next request.

        */ NextToken?: string; } -export namespace ListProcessingJobsResponse { +export namespace ListPipelineExecutionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListProcessingJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListPipelineExecutionsResponse): any => ({ ...obj, }); } diff --git a/clients/client-sagemaker/src/models/models_3.ts b/clients/client-sagemaker/src/models/models_3.ts index 43b8b2a47d69e..1755b2c7621b3 100644 --- a/clients/client-sagemaker/src/models/models_3.ts +++ b/clients/client-sagemaker/src/models/models_3.ts @@ -6,8 +6,12 @@ import { AppSpecification, BatchStrategy, BooleanOperator, + CacheHitResult, + CallbackStepMetadata, Channel, CheckpointConfig, + ClarifyCheckStepMetadata, + ConditionStepMetadata, DeploymentConfig, EdgeOutputConfig, FeatureDefinition, @@ -19,7 +23,6 @@ import { OutputDataConfig, OutputParameter, ResourceConfig, - RetryStrategy, StoppingCondition, Tag, TransformInput, @@ -39,7 +42,6 @@ import { DriftCheckBaselines, ExperimentConfig, FeatureParameter, - HyperParameterTrainingJobSummary, InstanceMetadataServiceConfiguration, MemberDefinition, ModelArtifacts, @@ -60,13 +62,13 @@ import { ProcessingStoppingCondition, ProfilerRuleConfiguration, ProvisioningParameter, + RetryStrategy, RootAccess, ServiceCatalogProvisioningDetails, SourceAlgorithmSpecification, SourceIpConfig, StudioLifecycleConfigAppType, TensorBoardOutputConfig, - TrainingJobStatus, TrialComponentArtifact, TrialComponentParameterValue, TrialComponentStatus, @@ -75,20 +77,24 @@ import { import { DesiredWeightAndCapacity, Device, + DeviceDeploymentSummary, Direction, DomainSettingsForUpdate, Edge, + EMRStepMetadata, Endpoint, Experiment, + FailStepMetadata, FeatureGroup, FeatureMetadata, Filter, GitConfigForUpdate, + HyperParameterTrainingJobSummary, + LambdaStepMetadata, LineageType, MetricData, ModelPackageGroupStatus, ModelPackageStatusDetails, - Parameter, PipelineExecutionStatus, PipelineExperimentConfig, PipelineStatus, @@ -101,6 +107,7 @@ import { SortBy, SortOrder, SubscribedWorkteam, + TrainingJobStatus, TransformJobStatus, TrialComponentMetricSummary, TrialComponentSource, @@ -110,6 +117,808 @@ import { Workteam, } from "./models_2"; +export interface ListPipelineExecutionStepsRequest { + /** + *

        The Amazon Resource Name (ARN) of the pipeline execution.

        + */ + PipelineExecutionArn?: string; + + /** + *

        If the result of the previous ListPipelineExecutionSteps request was truncated, + * the response includes a NextToken. To retrieve the next set of pipeline execution steps, use the token in the next request.

        + */ + NextToken?: string; + + /** + *

        The maximum number of pipeline execution steps to return in the response.

        + */ + MaxResults?: number; + + /** + *

        The field by which to sort results. The default is CreatedTime.

        + */ + SortOrder?: SortOrder | string; +} + +export namespace ListPipelineExecutionStepsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPipelineExecutionStepsRequest): any => ({ + ...obj, + }); +} + +/** + *

        Metadata for Model steps.

        + */ +export interface ModelStepMetadata { + /** + *

        The Amazon Resource Name (ARN) of the created model.

        + */ + Arn?: string; +} + +export namespace ModelStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModelStepMetadata): any => ({ + ...obj, + }); +} + +/** + *

        Metadata for a processing job step.

        + */ +export interface ProcessingJobStepMetadata { + /** + *

        The Amazon Resource Name (ARN) of the processing job.

        + */ + Arn?: string; +} + +export namespace ProcessingJobStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProcessingJobStepMetadata): any => ({ + ...obj, + }); +} + +/** + *

        Container for the metadata for a Quality check step. For more information, see + * the topic on QualityCheck step in the Amazon SageMaker Developer Guide. + *

        + */ +export interface QualityCheckStepMetadata { + /** + *

        The type of the Quality check step.

        + */ + CheckType?: string; + + /** + *

        The Amazon S3 URI of the baseline statistics file used for the drift check.

        + */ + BaselineUsedForDriftCheckStatistics?: string; + + /** + *

        The Amazon S3 URI of the baseline constraints file used for the drift check.

        + */ + BaselineUsedForDriftCheckConstraints?: string; + + /** + *

        The Amazon S3 URI of the newly calculated baseline statistics file.

        + */ + CalculatedBaselineStatistics?: string; + + /** + *

        The Amazon S3 URI of the newly calculated baseline constraints file.

        + */ + CalculatedBaselineConstraints?: string; + + /** + *

        The model package group name.

        + */ + ModelPackageGroupName?: string; + + /** + *

        The Amazon S3 URI of violation report if violations are detected.

        + */ + ViolationReport?: string; + + /** + *

        The Amazon Resource Name (ARN) of the Quality check processing job that was run by this step execution.

        + */ + CheckJobArn?: string; + + /** + *

        This flag indicates if the drift check against the previous baseline will be skipped or not. + * If it is set to False, the previous baseline of the configured check type must be available.

        + */ + SkipCheck?: boolean; + + /** + *

        This flag indicates if a newly calculated baseline can be accessed through step properties + * BaselineUsedForDriftCheckConstraints and BaselineUsedForDriftCheckStatistics. + * If it is set to False, the previous baseline of the configured check type must also be available. + * These can be accessed through the BaselineUsedForDriftCheckConstraints and + * BaselineUsedForDriftCheckStatistics properties.

        + */ + RegisterNewBaseline?: boolean; +} + +export namespace QualityCheckStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QualityCheckStepMetadata): any => ({ + ...obj, + }); +} + +/** + *

        Metadata for a register model job step.

        + */ +export interface RegisterModelStepMetadata { + /** + *

        The Amazon Resource Name (ARN) of the model package.

        + */ + Arn?: string; +} + +export namespace RegisterModelStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RegisterModelStepMetadata): any => ({ + ...obj, + }); +} + +/** + *

        Metadata for a training job step.

        + */ +export interface TrainingJobStepMetadata { + /** + *

        The Amazon Resource Name (ARN) of the training job that was run by this step execution.

        + */ + Arn?: string; +} + +export namespace TrainingJobStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TrainingJobStepMetadata): any => ({ + ...obj, + }); +} + +/** + *

        Metadata for a transform job step.

        + */ +export interface TransformJobStepMetadata { + /** + *

        The Amazon Resource Name (ARN) of the transform job that was run by this step execution.

        + */ + Arn?: string; +} + +export namespace TransformJobStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TransformJobStepMetadata): any => ({ + ...obj, + }); +} + +/** + *

        Metadata for a tuning step.

        + */ +export interface TuningJobStepMetaData { + /** + *

        The Amazon Resource Name (ARN) of the tuning job that was run by this step execution.

        + */ + Arn?: string; +} + +export namespace TuningJobStepMetaData { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TuningJobStepMetaData): any => ({ + ...obj, + }); +} + +/** + *

        Metadata for a step execution.

        + */ +export interface PipelineExecutionStepMetadata { + /** + *

        The Amazon Resource Name (ARN) of the training job that was run by this step execution.

        + */ + TrainingJob?: TrainingJobStepMetadata; + + /** + *

        The Amazon Resource Name (ARN) of the processing job that was run by this step execution.

        + */ + ProcessingJob?: ProcessingJobStepMetadata; + + /** + *

        The Amazon Resource Name (ARN) of the transform job that was run by this step execution.

        + */ + TransformJob?: TransformJobStepMetadata; + + /** + *

        The Amazon Resource Name (ARN) of the tuning job that was run by this step execution.

        + */ + TuningJob?: TuningJobStepMetaData; + + /** + *

        The Amazon Resource Name (ARN) of the model that was created by this step execution.

        + */ + Model?: ModelStepMetadata; + + /** + *

        The Amazon Resource Name (ARN) of the model package the model was registered to by this step execution.

        + */ + RegisterModel?: RegisterModelStepMetadata; + + /** + *

        The outcome of the condition evaluation that was run by this step execution.

        + */ + Condition?: ConditionStepMetadata; + + /** + *

        The URL of the Amazon SQS queue used by this step execution, the pipeline generated token, + * and a list of output parameters.

        + */ + Callback?: CallbackStepMetadata; + + /** + *

        The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution and a list of + * output parameters.

        + */ + Lambda?: LambdaStepMetadata; + + /** + *

        The configurations and outcomes of the check step execution. This includes:

        + *
          + *
        • + *

          The type of the check conducted,

          + *
        • + *
        • + *

          The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check.

          + *
        • + *
        • + *

          The Amazon S3 URIs of newly calculated baseline constraints and statistics.

          + *
        • + *
        • + *

          The model package group name provided.

          + *
        • + *
        • + *

          The Amazon S3 URI of the violation report if violations detected.

          + *
        • + *
        • + *

          The Amazon Resource Name (ARN) of check processing job initiated by the step execution.

          + *
        • + *
        • + *

          The boolean flags indicating if the drift check is skipped.

          + *
        • + *
        • + *

          If step property BaselineUsedForDriftCheck is set the same as + * CalculatedBaseline.

          + *
        • + *
        + */ + QualityCheck?: QualityCheckStepMetadata; + + /** + *

        Container for the metadata for a Clarify check step. The configurations + * and outcomes of the check step execution. This includes:

        + *
          + *
        • + *

          The type of the check conducted,

          + *
        • + *
        • + *

          The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check.

          + *
        • + *
        • + *

          The Amazon S3 URIs of newly calculated baseline constraints and statistics.

          + *
        • + *
        • + *

          The model package group name provided.

          + *
        • + *
        • + *

          The Amazon S3 URI of the violation report if violations detected.

          + *
        • + *
        • + *

          The Amazon Resource Name (ARN) of check processing job initiated by the step execution.

          + *
        • + *
        • + *

          The boolean flags indicating if the drift check is skipped.

          + *
        • + *
        • + *

          If step property BaselineUsedForDriftCheck is set the same as + * CalculatedBaseline.

          + *
        • + *
        + */ + ClarifyCheck?: ClarifyCheckStepMetadata; + + /** + *

        The configurations and outcomes of an EMR step execution.

        + */ + EMR?: EMRStepMetadata; + + /** + *

        The configurations and outcomes of a Fail step execution.

        + */ + Fail?: FailStepMetadata; +} + +export namespace PipelineExecutionStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PipelineExecutionStepMetadata): any => ({ + ...obj, + }); +} + +export enum StepStatus { + EXECUTING = "Executing", + FAILED = "Failed", + STARTING = "Starting", + STOPPED = "Stopped", + STOPPING = "Stopping", + SUCCEEDED = "Succeeded", +} + +/** + *

        An execution of a step in a pipeline.

        + */ +export interface PipelineExecutionStep { + /** + *

        The name of the step that is executed.

        + */ + StepName?: string; + + /** + *

        The display name of the step.

        + */ + StepDisplayName?: string; + + /** + *

        The description of the step.

        + */ + StepDescription?: string; + + /** + *

        The time that the step started executing.

        + */ + StartTime?: Date; + + /** + *

        The time that the step stopped executing.

        + */ + EndTime?: Date; + + /** + *

        The status of the step execution.

        + */ + StepStatus?: StepStatus | string; + + /** + *

        If this pipeline execution step was cached, details on the cache hit.

        + */ + CacheHitResult?: CacheHitResult; + + /** + *

        The current attempt of the execution step. For more information, see Retry Policy for SageMaker Pipelines steps.

        + */ + AttemptCount?: number; + + /** + *

        The reason why the step failed execution. This is only returned if the step failed its execution.

        + */ + FailureReason?: string; + + /** + *

        Metadata for the step execution.

        + */ + Metadata?: PipelineExecutionStepMetadata; +} + +export namespace PipelineExecutionStep { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PipelineExecutionStep): any => ({ + ...obj, + }); +} + +export interface ListPipelineExecutionStepsResponse { + /** + *

        A list of PipeLineExecutionStep objects. Each + * PipeLineExecutionStep consists of StepName, StartTime, EndTime, StepStatus, + * and Metadata. Metadata is an object with properties for each job that contains relevant + * information about the job created by the step.

        + */ + PipelineExecutionSteps?: PipelineExecutionStep[]; + + /** + *

        If the result of the previous ListPipelineExecutionSteps request was truncated, + * the response includes a NextToken. To retrieve the next set of pipeline execution steps, use the token in the next request.

        + */ + NextToken?: string; +} + +export namespace ListPipelineExecutionStepsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPipelineExecutionStepsResponse): any => ({ + ...obj, + }); +} + +export interface ListPipelineParametersForExecutionRequest { + /** + *

        The Amazon Resource Name (ARN) of the pipeline execution.

        + */ + PipelineExecutionArn: string | undefined; + + /** + *

        If the result of the previous ListPipelineParametersForExecution request was truncated, + * the response includes a NextToken. To retrieve the next set of parameters, use the token in the next request.

        + */ + NextToken?: string; + + /** + *

        The maximum number of parameters to return in the response.

        + */ + MaxResults?: number; +} + +export namespace ListPipelineParametersForExecutionRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPipelineParametersForExecutionRequest): any => ({ + ...obj, + }); +} + +/** + *

        Assigns a value to a named Pipeline parameter.

        + */ +export interface Parameter { + /** + *

        The name of the parameter to assign a value to. This + * parameter name must match a named parameter in the + * pipeline definition.

        + */ + Name: string | undefined; + + /** + *

        The literal value for the parameter.

        + */ + Value: string | undefined; +} + +export namespace Parameter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Parameter): any => ({ + ...obj, + }); +} + +export interface ListPipelineParametersForExecutionResponse { + /** + *

        Contains a list of pipeline parameters. This list can be empty.

        + */ + PipelineParameters?: Parameter[]; + + /** + *

        If the result of the previous ListPipelineParametersForExecution request was truncated, + * the response includes a NextToken. To retrieve the next set of parameters, use the token in the next request.

        + */ + NextToken?: string; +} + +export namespace ListPipelineParametersForExecutionResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPipelineParametersForExecutionResponse): any => ({ + ...obj, + }); +} + +export enum SortPipelinesBy { + CREATION_TIME = "CreationTime", + NAME = "Name", +} + +export interface ListPipelinesRequest { + /** + *

        The prefix of the pipeline name.

        + */ + PipelineNamePrefix?: string; + + /** + *

        A filter that returns the pipelines that were created after a specified + * time.

        + */ + CreatedAfter?: Date; + + /** + *

        A filter that returns the pipelines that were created before a specified + * time.

        + */ + CreatedBefore?: Date; + + /** + *

        The field by which to sort results. The default is CreatedTime.

        + */ + SortBy?: SortPipelinesBy | string; + + /** + *

        The sort order for results.

        + */ + SortOrder?: SortOrder | string; + + /** + *

        If the result of the previous ListPipelines request was truncated, + * the response includes a NextToken. To retrieve the next set of pipelines, use the token in the next request.

        + */ + NextToken?: string; + + /** + *

        The maximum number of pipelines to return in the response.

        + */ + MaxResults?: number; +} + +export namespace ListPipelinesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPipelinesRequest): any => ({ + ...obj, + }); +} + +/** + *

        A summary of a pipeline.

        + */ +export interface PipelineSummary { + /** + *

        The Amazon Resource Name (ARN) of the pipeline.

        + */ + PipelineArn?: string; + + /** + *

        The name of the pipeline.

        + */ + PipelineName?: string; + + /** + *

        The display name of the pipeline.

        + */ + PipelineDisplayName?: string; + + /** + *

        The description of the pipeline.

        + */ + PipelineDescription?: string; + + /** + *

        The Amazon Resource Name (ARN) that the pipeline used to execute.

        + */ + RoleArn?: string; + + /** + *

        The creation time of the pipeline.

        + */ + CreationTime?: Date; + + /** + *

        The time that the pipeline was last modified.

        + */ + LastModifiedTime?: Date; + + /** + *

        The last time that a pipeline execution began.

        + */ + LastExecutionTime?: Date; +} + +export namespace PipelineSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PipelineSummary): any => ({ + ...obj, + }); +} + +export interface ListPipelinesResponse { + /** + *

        Contains a sorted list of PipelineSummary objects matching the specified + * filters. Each PipelineSummary consists of PipelineArn, PipelineName, + * ExperimentName, PipelineDescription, CreationTime, LastModifiedTime, LastRunTime, and + * RoleArn. This list can be empty.

        + */ + PipelineSummaries?: PipelineSummary[]; + + /** + *

        If the result of the previous ListPipelines request was truncated, + * the response includes a NextToken. To retrieve the next set of pipelines, use the token in the next request.

        + */ + NextToken?: string; +} + +export namespace ListPipelinesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPipelinesResponse): any => ({ + ...obj, + }); +} + +export interface ListProcessingJobsRequest { + /** + *

        A filter that returns only processing jobs created after the specified time.

        + */ + CreationTimeAfter?: Date; + + /** + *

        A filter that returns only processing jobs created after the specified time.

        + */ + CreationTimeBefore?: Date; + + /** + *

        A filter that returns only processing jobs modified after the specified time.

        + */ + LastModifiedTimeAfter?: Date; + + /** + *

        A filter that returns only processing jobs modified before the specified time.

        + */ + LastModifiedTimeBefore?: Date; + + /** + *

        A string in the processing job name. This filter returns only processing jobs whose + * name contains the specified string.

        + */ + NameContains?: string; + + /** + *

        A filter that retrieves only processing jobs with a specific status.

        + */ + StatusEquals?: ProcessingJobStatus | string; + + /** + *

        The field to sort results by. The default is CreationTime.

        + */ + SortBy?: SortBy | string; + + /** + *

        The sort order for results. The default is Ascending.

        + */ + SortOrder?: SortOrder | string; + + /** + *

        If the result of the previous ListProcessingJobs request was truncated, + * the response includes a NextToken. To retrieve the next set of processing + * jobs, use the token in the next request.

        + */ + NextToken?: string; + + /** + *

        The maximum number of processing jobs to return in the response.

        + */ + MaxResults?: number; +} + +export namespace ListProcessingJobsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListProcessingJobsRequest): any => ({ + ...obj, + }); +} + +/** + *

        Summary of information about a processing job.

        + */ +export interface ProcessingJobSummary { + /** + *

        The name of the processing job.

        + */ + ProcessingJobName: string | undefined; + + /** + *

        The Amazon Resource Name (ARN) of the processing job..

        + */ + ProcessingJobArn: string | undefined; + + /** + *

        The time at which the processing job was created.

        + */ + CreationTime: Date | undefined; + + /** + *

        The time at which the processing job completed.

        + */ + ProcessingEndTime?: Date; + + /** + *

        A timestamp that indicates the last time the processing job was modified.

        + */ + LastModifiedTime?: Date; + + /** + *

        The status of the processing job.

        + */ + ProcessingJobStatus: ProcessingJobStatus | string | undefined; + + /** + *

        A string, up to one KB in size, that contains the reason a processing job failed, if + * it failed.

        + */ + FailureReason?: string; + + /** + *

        An optional string, up to one KB in size, that contains metadata from the processing + * container when the processing job exits.

        + */ + ExitMessage?: string; +} + +export namespace ProcessingJobSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProcessingJobSummary): any => ({ + ...obj, + }); +} + +export interface ListProcessingJobsResponse { + /** + *

        An array of ProcessingJobSummary objects, each listing a processing + * job.

        + */ + ProcessingJobSummaries: ProcessingJobSummary[] | undefined; + + /** + *

        If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of + * processing jobs, use it in the subsequent request.

        + */ + NextToken?: string; +} + +export namespace ListProcessingJobsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListProcessingJobsResponse): any => ({ + ...obj, + }); +} + export enum ProjectSortBy { CREATION_TIME = "CreationTime", NAME = "Name", @@ -237,6 +1046,63 @@ export namespace ListProjectsOutput { }); } +export interface ListStageDevicesRequest { + /** + *

        The response from the last list when returning a list large enough to neeed tokening.

        + */ + NextToken?: string; + + /** + *

        The maximum number of requests to select.

        + */ + MaxResults?: number; + + /** + *

        The name of the edge deployment plan.

        + */ + EdgeDeploymentPlanName: string | undefined; + + /** + *

        Toggle for excluding devices deployed in other stages.

        + */ + ExcludeDevicesDeployedInOtherStage?: boolean; + + /** + *

        The name of the stage in the deployment.

        + */ + StageName: string | undefined; +} + +export namespace ListStageDevicesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListStageDevicesRequest): any => ({ + ...obj, + }); +} + +export interface ListStageDevicesResponse { + /** + *

        List of summaries of devices allocated to the stage.

        + */ + DeviceDeploymentSummaries: DeviceDeploymentSummary[] | undefined; + + /** + *

        The token to use when calling the next page of results.

        + */ + NextToken?: string; +} + +export namespace ListStageDevicesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListStageDevicesResponse): any => ({ + ...obj, + }); +} + export enum StudioLifecycleConfigSortKey { CreationTime = "CreationTime", LastModifiedTime = "LastModifiedTime", @@ -3563,6 +4429,27 @@ export namespace SendPipelineExecutionStepSuccessResponse { }); } +export interface StartEdgeDeploymentStageRequest { + /** + *

        The name of the edge deployment plan to start.

        + */ + EdgeDeploymentPlanName: string | undefined; + + /** + *

        The name of the stage to start.

        + */ + StageName: string | undefined; +} + +export namespace StartEdgeDeploymentStageRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartEdgeDeploymentStageRequest): any => ({ + ...obj, + }); +} + export interface StartMonitoringScheduleRequest { /** *

        The name of the schedule to start.

        @@ -3686,6 +4573,27 @@ export namespace StopCompilationJobRequest { }); } +export interface StopEdgeDeploymentStageRequest { + /** + *

        The name of the edge deployment plan to stop.

        + */ + EdgeDeploymentPlanName: string | undefined; + + /** + *

        The name of the stage to stop.

        + */ + StageName: string | undefined; +} + +export namespace StopEdgeDeploymentStageRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StopEdgeDeploymentStageRequest): any => ({ + ...obj, + }); +} + export interface StopEdgePackagingJobRequest { /** *

        The name of the edge packaging job.

        diff --git a/clients/client-sagemaker/src/pagination/ListEdgeDeploymentPlansPaginator.ts b/clients/client-sagemaker/src/pagination/ListEdgeDeploymentPlansPaginator.ts new file mode 100644 index 0000000000000..6a4c3e0efb5bf --- /dev/null +++ b/clients/client-sagemaker/src/pagination/ListEdgeDeploymentPlansPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListEdgeDeploymentPlansCommand, + ListEdgeDeploymentPlansCommandInput, + ListEdgeDeploymentPlansCommandOutput, +} from "../commands/ListEdgeDeploymentPlansCommand"; +import { SageMaker } from "../SageMaker"; +import { SageMakerClient } from "../SageMakerClient"; +import { SageMakerPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: SageMakerClient, + input: ListEdgeDeploymentPlansCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListEdgeDeploymentPlansCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: SageMaker, + input: ListEdgeDeploymentPlansCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listEdgeDeploymentPlans(input, ...args); +}; +export async function* paginateListEdgeDeploymentPlans( + config: SageMakerPaginationConfiguration, + input: ListEdgeDeploymentPlansCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListEdgeDeploymentPlansCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof SageMaker) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof SageMakerClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected SageMaker | SageMakerClient"); + } + yield page; + const prevToken = token; + token = page.NextToken; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-sagemaker/src/pagination/ListStageDevicesPaginator.ts b/clients/client-sagemaker/src/pagination/ListStageDevicesPaginator.ts new file mode 100644 index 0000000000000..90dbb178f4f43 --- /dev/null +++ b/clients/client-sagemaker/src/pagination/ListStageDevicesPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListStageDevicesCommand, + ListStageDevicesCommandInput, + ListStageDevicesCommandOutput, +} from "../commands/ListStageDevicesCommand"; +import { SageMaker } from "../SageMaker"; +import { SageMakerClient } from "../SageMakerClient"; +import { SageMakerPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: SageMakerClient, + input: ListStageDevicesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListStageDevicesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: SageMaker, + input: ListStageDevicesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listStageDevices(input, ...args); +}; +export async function* paginateListStageDevices( + config: SageMakerPaginationConfiguration, + input: ListStageDevicesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListStageDevicesCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof SageMaker) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof SageMakerClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected SageMaker | SageMakerClient"); + } + yield page; + const prevToken = token; + token = page.NextToken; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-sagemaker/src/pagination/index.ts b/clients/client-sagemaker/src/pagination/index.ts index 31c719ebcf548..f8dad0136d937 100644 --- a/clients/client-sagemaker/src/pagination/index.ts +++ b/clients/client-sagemaker/src/pagination/index.ts @@ -15,6 +15,7 @@ export * from "./ListDataQualityJobDefinitionsPaginator"; export * from "./ListDeviceFleetsPaginator"; export * from "./ListDevicesPaginator"; export * from "./ListDomainsPaginator"; +export * from "./ListEdgeDeploymentPlansPaginator"; export * from "./ListEdgePackagingJobsPaginator"; export * from "./ListEndpointConfigsPaginator"; export * from "./ListEndpointsPaginator"; @@ -45,6 +46,7 @@ export * from "./ListPipelineParametersForExecutionPaginator"; export * from "./ListPipelinesPaginator"; export * from "./ListProcessingJobsPaginator"; export * from "./ListProjectsPaginator"; +export * from "./ListStageDevicesPaginator"; export * from "./ListStudioLifecycleConfigsPaginator"; export * from "./ListSubscribedWorkteamsPaginator"; export * from "./ListTagsPaginator"; diff --git a/clients/client-sagemaker/src/protocols/Aws_json1_1.ts b/clients/client-sagemaker/src/protocols/Aws_json1_1.ts index 01150d33f17b0..391424e0ccbbb 100644 --- a/clients/client-sagemaker/src/protocols/Aws_json1_1.ts +++ b/clients/client-sagemaker/src/protocols/Aws_json1_1.ts @@ -57,6 +57,14 @@ import { } from "../commands/CreateDataQualityJobDefinitionCommand"; import { CreateDeviceFleetCommandInput, CreateDeviceFleetCommandOutput } from "../commands/CreateDeviceFleetCommand"; import { CreateDomainCommandInput, CreateDomainCommandOutput } from "../commands/CreateDomainCommand"; +import { + CreateEdgeDeploymentPlanCommandInput, + CreateEdgeDeploymentPlanCommandOutput, +} from "../commands/CreateEdgeDeploymentPlanCommand"; +import { + CreateEdgeDeploymentStageCommandInput, + CreateEdgeDeploymentStageCommandOutput, +} from "../commands/CreateEdgeDeploymentStageCommand"; import { CreateEdgePackagingJobCommandInput, CreateEdgePackagingJobCommandOutput, @@ -162,6 +170,14 @@ import { } from "../commands/DeleteDataQualityJobDefinitionCommand"; import { DeleteDeviceFleetCommandInput, DeleteDeviceFleetCommandOutput } from "../commands/DeleteDeviceFleetCommand"; import { DeleteDomainCommandInput, DeleteDomainCommandOutput } from "../commands/DeleteDomainCommand"; +import { + DeleteEdgeDeploymentPlanCommandInput, + DeleteEdgeDeploymentPlanCommandOutput, +} from "../commands/DeleteEdgeDeploymentPlanCommand"; +import { + DeleteEdgeDeploymentStageCommandInput, + DeleteEdgeDeploymentStageCommandOutput, +} from "../commands/DeleteEdgeDeploymentStageCommand"; import { DeleteEndpointCommandInput, DeleteEndpointCommandOutput } from "../commands/DeleteEndpointCommand"; import { DeleteEndpointConfigCommandInput, @@ -254,6 +270,10 @@ import { DescribeDeviceFleetCommandOutput, } from "../commands/DescribeDeviceFleetCommand"; import { DescribeDomainCommandInput, DescribeDomainCommandOutput } from "../commands/DescribeDomainCommand"; +import { + DescribeEdgeDeploymentPlanCommandInput, + DescribeEdgeDeploymentPlanCommandOutput, +} from "../commands/DescribeEdgeDeploymentPlanCommand"; import { DescribeEdgePackagingJobCommandInput, DescribeEdgePackagingJobCommandOutput, @@ -437,6 +457,10 @@ import { import { ListDeviceFleetsCommandInput, ListDeviceFleetsCommandOutput } from "../commands/ListDeviceFleetsCommand"; import { ListDevicesCommandInput, ListDevicesCommandOutput } from "../commands/ListDevicesCommand"; import { ListDomainsCommandInput, ListDomainsCommandOutput } from "../commands/ListDomainsCommand"; +import { + ListEdgeDeploymentPlansCommandInput, + ListEdgeDeploymentPlansCommandOutput, +} from "../commands/ListEdgeDeploymentPlansCommand"; import { ListEdgePackagingJobsCommandInput, ListEdgePackagingJobsCommandOutput, @@ -519,6 +543,7 @@ import { import { ListPipelinesCommandInput, ListPipelinesCommandOutput } from "../commands/ListPipelinesCommand"; import { ListProcessingJobsCommandInput, ListProcessingJobsCommandOutput } from "../commands/ListProcessingJobsCommand"; import { ListProjectsCommandInput, ListProjectsCommandOutput } from "../commands/ListProjectsCommand"; +import { ListStageDevicesCommandInput, ListStageDevicesCommandOutput } from "../commands/ListStageDevicesCommand"; import { ListStudioLifecycleConfigsCommandInput, ListStudioLifecycleConfigsCommandOutput, @@ -562,6 +587,10 @@ import { SendPipelineExecutionStepSuccessCommandInput, SendPipelineExecutionStepSuccessCommandOutput, } from "../commands/SendPipelineExecutionStepSuccessCommand"; +import { + StartEdgeDeploymentStageCommandInput, + StartEdgeDeploymentStageCommandOutput, +} from "../commands/StartEdgeDeploymentStageCommand"; import { StartMonitoringScheduleCommandInput, StartMonitoringScheduleCommandOutput, @@ -576,6 +605,10 @@ import { } from "../commands/StartPipelineExecutionCommand"; import { StopAutoMLJobCommandInput, StopAutoMLJobCommandOutput } from "../commands/StopAutoMLJobCommand"; import { StopCompilationJobCommandInput, StopCompilationJobCommandOutput } from "../commands/StopCompilationJobCommand"; +import { + StopEdgeDeploymentStageCommandInput, + StopEdgeDeploymentStageCommandOutput, +} from "../commands/StopEdgeDeploymentStageCommand"; import { StopEdgePackagingJobCommandInput, StopEdgePackagingJobCommandOutput, @@ -763,6 +796,9 @@ import { CreateDeviceFleetRequest, CreateDomainRequest, CreateDomainResponse, + CreateEdgeDeploymentPlanRequest, + CreateEdgeDeploymentPlanResponse, + CreateEdgeDeploymentStageRequest, CreateEdgePackagingJobRequest, CreateEndpointConfigInput, CreateEndpointConfigOutput, @@ -776,8 +812,6 @@ import { CreateFlowDefinitionResponse, CreateHumanTaskUiRequest, CreateHumanTaskUiResponse, - CreateHyperParameterTuningJobRequest, - CreateHyperParameterTuningJobResponse, CustomImage, DataCaptureConfig, DataCatalogConfig, @@ -786,7 +820,11 @@ import { DataQualityJobInput, DataSource, DeploymentConfig, + DeploymentStage, + DeviceSelectionConfig, DomainSettings, + EdgeDeploymentConfig, + EdgeDeploymentModelConfig, EdgeOutputConfig, EndpointInput, FeatureDefinition, @@ -799,12 +837,8 @@ import { HumanLoopActivationConfig, HumanLoopConfig, HumanLoopRequestSource, - HyperParameterAlgorithmSpecification, HyperParameterSpecification, - HyperParameterTrainingJobDefinition, - HyperParameterTuningJobConfig, HyperParameterTuningJobObjective, - HyperParameterTuningJobWarmStartConfig, ImageConfig, InferenceSpecification, InputConfig, @@ -841,7 +875,6 @@ import { OutputParameter, ParameterRange, ParameterRanges, - ParentHyperParameterTuningJob, ProductionVariant, ProductionVariantCoreDumpConfig, ProductionVariantInstanceType, @@ -854,7 +887,6 @@ import { ResourceLimits, ResourceNotFound, ResourceSpec, - RetryStrategy, RSessionAppSettings, RStudioServerProAppSettings, RStudioServerProDomainSettings, @@ -886,6 +918,8 @@ import { VpcConfig, } from "../models/models_0"; import { + CreateHyperParameterTuningJobRequest, + CreateHyperParameterTuningJobResponse, CreateImageRequest, CreateImageResponse, CreateImageVersionRequest, @@ -959,6 +993,8 @@ import { DeleteDataQualityJobDefinitionRequest, DeleteDeviceFleetRequest, DeleteDomainRequest, + DeleteEdgeDeploymentPlanRequest, + DeleteEdgeDeploymentStageRequest, DeleteEndpointConfigInput, DeleteEndpointInput, DeleteExperimentRequest, @@ -998,6 +1034,7 @@ import { DeleteWorkteamRequest, DeleteWorkteamResponse, DeployedImage, + DeploymentStageStatusSummary, DeregisterDevicesRequest, DescribeActionRequest, DescribeActionResponse, @@ -1025,6 +1062,8 @@ import { DescribeDeviceResponse, DescribeDomainRequest, DescribeDomainResponse, + DescribeEdgeDeploymentPlanRequest, + DescribeEdgeDeploymentPlanResponse, DescribeEdgePackagingJobRequest, DescribeEdgePackagingJobResponse, DescribeEndpointConfigInput, @@ -1041,31 +1080,26 @@ import { DescribeFlowDefinitionResponse, DescribeHumanTaskUiRequest, DescribeHumanTaskUiResponse, - DescribeHyperParameterTuningJobRequest, - DescribeHyperParameterTuningJobResponse, - DescribeImageRequest, - DescribeImageResponse, - DescribeImageVersionRequest, - DescribeImageVersionResponse, - DescribeInferenceRecommendationsJobRequest, DriftCheckBaselines, DriftCheckBias, DriftCheckExplainability, DriftCheckModelDataQuality, DriftCheckModelQuality, + EdgeDeploymentStatus, EdgeModel, EdgePresetDeploymentOutput, EndpointInputConfiguration, - EndpointOutputConfiguration, EnvironmentParameterRanges, ExperimentConfig, ExperimentSource, Explainability, FeatureParameter, FileSource, - FinalHyperParameterTuningJobObjectiveMetric, HumanTaskConfig, - HyperParameterTrainingJobSummary, + HyperParameterAlgorithmSpecification, + HyperParameterTrainingJobDefinition, + HyperParameterTuningJobConfig, + HyperParameterTuningJobWarmStartConfig, InferenceExecutionConfig, InstanceMetadataServiceConfiguration, LabelingJobAlgorithmsConfig, @@ -1108,11 +1142,11 @@ import { NotebookInstanceAcceleratorType, NotebookInstanceLifecycleHook, NotificationConfiguration, - ObjectiveStatusCounters, OfflineStoreStatus, OidcConfig, OidcMemberDefinition, ParallelismConfiguration, + ParentHyperParameterTuningJob, PendingDeploymentSummary, PendingProductionVariantSummary, Phase, @@ -1139,6 +1173,7 @@ import { RedshiftDatasetDefinition, ResolvedAttributes, RetentionPolicy, + RetryStrategy, ScheduleConfig, ServiceCatalogProvisioningDetails, SourceAlgorithm, @@ -1146,7 +1181,6 @@ import { SourceIpConfig, TensorBoardOutputConfig, TrafficPattern, - TrainingJobStatusCounters, TrialComponentArtifact, TrialComponentParameterValue, TrialComponentStatus, @@ -1155,6 +1189,13 @@ import { WorkforceVpcConfigRequest, } from "../models/models_1"; import { + DescribeHyperParameterTuningJobRequest, + DescribeHyperParameterTuningJobResponse, + DescribeImageRequest, + DescribeImageResponse, + DescribeImageVersionRequest, + DescribeImageVersionResponse, + DescribeInferenceRecommendationsJobRequest, DescribeInferenceRecommendationsJobResponse, DescribeLabelingJobRequest, DescribeLabelingJobResponse, @@ -1208,6 +1249,7 @@ import { DescribeWorkteamResponse, DesiredWeightAndCapacity, Device, + DeviceDeploymentSummary, DeviceFleetSummary, DeviceStats, DeviceSummary, @@ -1218,6 +1260,7 @@ import { DomainDetails, DomainSettingsForUpdate, Edge, + EdgeDeploymentPlanSummary, EdgeModelStat, EdgeModelSummary, EdgePackagingJobSummary, @@ -1226,6 +1269,7 @@ import { EnableSagemakerServicecatalogPortfolioOutput, Endpoint, EndpointConfigSummary, + EndpointOutputConfiguration, EndpointSummary, EnvironmentParameter, Experiment, @@ -1235,6 +1279,7 @@ import { FeatureGroupSummary, FeatureMetadata, Filter, + FinalHyperParameterTuningJobObjectiveMetric, FlowDefinitionSummary, GetDeviceFleetReportRequest, GetDeviceFleetReportResponse, @@ -1248,6 +1293,7 @@ import { GetSearchSuggestionsResponse, GitConfigForUpdate, HumanTaskUiSummary, + HyperParameterTrainingJobSummary, HyperParameterTuningJobSummary, Image, ImageVersion, @@ -1291,6 +1337,8 @@ import { ListDevicesResponse, ListDomainsRequest, ListDomainsResponse, + ListEdgeDeploymentPlansRequest, + ListEdgeDeploymentPlansResponse, ListEdgePackagingJobsRequest, ListEdgePackagingJobsResponse, ListEndpointConfigsInput, @@ -1343,14 +1391,6 @@ import { ListNotebookInstancesOutput, ListPipelineExecutionsRequest, ListPipelineExecutionsResponse, - ListPipelineExecutionStepsRequest, - ListPipelineExecutionStepsResponse, - ListPipelineParametersForExecutionRequest, - ListPipelineParametersForExecutionResponse, - ListPipelinesRequest, - ListPipelinesResponse, - ListProcessingJobsRequest, - ListProcessingJobsResponse, MetricData, ModelConfiguration, ModelMetadataFilter, @@ -1360,7 +1400,6 @@ import { ModelPackageStatusDetails, ModelPackageStatusItem, ModelPackageSummary, - ModelStepMetadata, ModelSummary, MonitoringExecutionSummary, MonitoringJobDefinitionSummary, @@ -1368,39 +1407,40 @@ import { MonitoringScheduleSummary, NotebookInstanceLifecycleConfigSummary, NotebookInstanceSummary, + ObjectiveStatusCounters, OidcConfigForResponse, - Parameter, - PipelineExecutionStep, - PipelineExecutionStepMetadata, PipelineExecutionSummary, PipelineExperimentConfig, - PipelineSummary, - ProcessingJobStepMetadata, - ProcessingJobSummary, ProfilerRuleEvaluationStatus, PropertyNameQuery, PropertyNameSuggestion, - QualityCheckStepMetadata, RecommendationMetrics, - RegisterModelStepMetadata, RStudioServerProDomainSettingsForUpdate, SecondaryStatusTransition, ServiceCatalogProvisionedProductDetails, SubscribedWorkteam, SuggestionQuery, - TrainingJobStepMetadata, - TransformJobStepMetadata, + TrainingJobStatusCounters, TrialComponentMetricSummary, TrialComponentSource, TrialSource, - TuningJobStepMetaData, Workforce, WorkforceVpcConfigResponse, Workteam, } from "../models/models_2"; import { + ListPipelineExecutionStepsRequest, + ListPipelineExecutionStepsResponse, + ListPipelineParametersForExecutionRequest, + ListPipelineParametersForExecutionResponse, + ListPipelinesRequest, + ListPipelinesResponse, + ListProcessingJobsRequest, + ListProcessingJobsResponse, ListProjectsInput, ListProjectsOutput, + ListStageDevicesRequest, + ListStageDevicesResponse, ListStudioLifecycleConfigsRequest, ListStudioLifecycleConfigsResponse, ListSubscribedWorkteamsRequest, @@ -1425,20 +1465,29 @@ import { ListWorkteamsResponse, ModelPackage, ModelPackageGroup, + ModelStepMetadata, NestedFilters, + Parameter, Parent, Pipeline, PipelineExecution, + PipelineExecutionStep, + PipelineExecutionStepMetadata, + PipelineSummary, ProcessingJob, + ProcessingJobStepMetadata, + ProcessingJobSummary, ProfilerConfigForUpdate, Project, ProjectSummary, PutModelPackageGroupPolicyInput, PutModelPackageGroupPolicyOutput, + QualityCheckStepMetadata, QueryFilters, QueryLineageRequest, QueryLineageResponse, RegisterDevicesRequest, + RegisterModelStepMetadata, RenderableTask, RenderingError, RenderUiTemplateRequest, @@ -1454,12 +1503,14 @@ import { SendPipelineExecutionStepSuccessRequest, SendPipelineExecutionStepSuccessResponse, ServiceCatalogProvisioningUpdateDetails, + StartEdgeDeploymentStageRequest, StartMonitoringScheduleRequest, StartNotebookInstanceInput, StartPipelineExecutionRequest, StartPipelineExecutionResponse, StopAutoMLJobRequest, StopCompilationJobRequest, + StopEdgeDeploymentStageRequest, StopEdgePackagingJobRequest, StopHyperParameterTuningJobRequest, StopInferenceRecommendationsJobRequest, @@ -1473,8 +1524,10 @@ import { StopTransformJobRequest, StudioLifecycleConfigDetails, TrainingJob, + TrainingJobStepMetadata, TrainingJobSummary, TransformJob, + TransformJobStepMetadata, TransformJobSummary, Trial, TrialComponent, @@ -1482,6 +1535,7 @@ import { TrialComponentSourceDetail, TrialComponentSummary, TrialSummary, + TuningJobStepMetaData, UpdateActionRequest, UpdateActionResponse, UpdateAppImageConfigRequest, @@ -1747,6 +1801,32 @@ export const serializeAws_json1_1CreateDomainCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1CreateEdgeDeploymentPlanCommand = async ( + input: CreateEdgeDeploymentPlanCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.CreateEdgeDeploymentPlan", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1CreateEdgeDeploymentPlanRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_1CreateEdgeDeploymentStageCommand = async ( + input: CreateEdgeDeploymentStageCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.CreateEdgeDeploymentStage", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1CreateEdgeDeploymentStageRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1CreateEdgePackagingJobCommand = async ( input: CreateEdgePackagingJobCommandInput, context: __SerdeContext @@ -2332,6 +2412,32 @@ export const serializeAws_json1_1DeleteDomainCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DeleteEdgeDeploymentPlanCommand = async ( + input: DeleteEdgeDeploymentPlanCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.DeleteEdgeDeploymentPlan", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DeleteEdgeDeploymentPlanRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_1DeleteEdgeDeploymentStageCommand = async ( + input: DeleteEdgeDeploymentStageCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.DeleteEdgeDeploymentStage", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DeleteEdgeDeploymentStageRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DeleteEndpointCommand = async ( input: DeleteEndpointCommandInput, context: __SerdeContext @@ -2865,6 +2971,19 @@ export const serializeAws_json1_1DescribeDomainCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DescribeEdgeDeploymentPlanCommand = async ( + input: DescribeEdgeDeploymentPlanCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.DescribeEdgeDeploymentPlan", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DescribeEdgeDeploymentPlanRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DescribeEdgePackagingJobCommand = async ( input: DescribeEdgePackagingJobCommandInput, context: __SerdeContext @@ -3645,6 +3764,19 @@ export const serializeAws_json1_1ListDomainsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1ListEdgeDeploymentPlansCommand = async ( + input: ListEdgeDeploymentPlansCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.ListEdgeDeploymentPlans", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1ListEdgeDeploymentPlansRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1ListEdgePackagingJobsCommand = async ( input: ListEdgePackagingJobsCommandInput, context: __SerdeContext @@ -4048,6 +4180,19 @@ export const serializeAws_json1_1ListProjectsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1ListStageDevicesCommand = async ( + input: ListStageDevicesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.ListStageDevices", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1ListStageDevicesRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1ListStudioLifecycleConfigsCommand = async ( input: ListStudioLifecycleConfigsCommandInput, context: __SerdeContext @@ -4295,6 +4440,19 @@ export const serializeAws_json1_1SendPipelineExecutionStepSuccessCommand = async return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1StartEdgeDeploymentStageCommand = async ( + input: StartEdgeDeploymentStageCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.StartEdgeDeploymentStage", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1StartEdgeDeploymentStageRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1StartMonitoringScheduleCommand = async ( input: StartMonitoringScheduleCommandInput, context: __SerdeContext @@ -4360,6 +4518,19 @@ export const serializeAws_json1_1StopCompilationJobCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1StopEdgeDeploymentStageCommand = async ( + input: StopEdgeDeploymentStageCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.StopEdgeDeploymentStage", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1StopEdgeDeploymentStageRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1StopEdgePackagingJobCommand = async ( input: StopEdgePackagingJobCommandInput, context: __SerdeContext @@ -5554,6 +5725,91 @@ const deserializeAws_json1_1CreateDomainCommandError = async ( } }; +export const deserializeAws_json1_1CreateEdgeDeploymentPlanCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1CreateEdgeDeploymentPlanCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1CreateEdgeDeploymentPlanResponse(data, context); + const response: CreateEdgeDeploymentPlanCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1CreateEdgeDeploymentPlanCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ResourceLimitExceeded": + case "com.amazonaws.sagemaker#ResourceLimitExceeded": + throw await deserializeAws_json1_1ResourceLimitExceededResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_1CreateEdgeDeploymentStageCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1CreateEdgeDeploymentStageCommandError(output, context); + } + await collectBody(output.body, context); + const response: CreateEdgeDeploymentStageCommandOutput = { + $metadata: deserializeMetadata(output), + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1CreateEdgeDeploymentStageCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ResourceLimitExceeded": + case "com.amazonaws.sagemaker#ResourceLimitExceeded": + throw await deserializeAws_json1_1ResourceLimitExceededResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1CreateEdgePackagingJobCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -7576,6 +7832,88 @@ const deserializeAws_json1_1DeleteDomainCommandError = async ( } }; +export const deserializeAws_json1_1DeleteEdgeDeploymentPlanCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DeleteEdgeDeploymentPlanCommandError(output, context); + } + await collectBody(output.body, context); + const response: DeleteEdgeDeploymentPlanCommandOutput = { + $metadata: deserializeMetadata(output), + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DeleteEdgeDeploymentPlanCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ResourceInUse": + case "com.amazonaws.sagemaker#ResourceInUse": + throw await deserializeAws_json1_1ResourceInUseResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + +export const deserializeAws_json1_1DeleteEdgeDeploymentStageCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DeleteEdgeDeploymentStageCommandError(output, context); + } + await collectBody(output.body, context); + const response: DeleteEdgeDeploymentStageCommandOutput = { + $metadata: deserializeMetadata(output), + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DeleteEdgeDeploymentStageCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ResourceInUse": + case "com.amazonaws.sagemaker#ResourceInUse": + throw await deserializeAws_json1_1ResourceInUseResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1DeleteEndpointCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -9311,6 +9649,50 @@ const deserializeAws_json1_1DescribeDomainCommandError = async ( } }; +export const deserializeAws_json1_1DescribeEdgeDeploymentPlanCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DescribeEdgeDeploymentPlanCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DescribeEdgeDeploymentPlanResponse(data, context); + const response: DescribeEdgeDeploymentPlanCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DescribeEdgeDeploymentPlanCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ResourceNotFound": + case "com.amazonaws.sagemaker#ResourceNotFound": + throw await deserializeAws_json1_1ResourceNotFoundResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1DescribeEdgePackagingJobCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -11870,6 +12252,47 @@ const deserializeAws_json1_1ListDomainsCommandError = async ( } }; +export const deserializeAws_json1_1ListEdgeDeploymentPlansCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1ListEdgeDeploymentPlansCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1ListEdgeDeploymentPlansResponse(data, context); + const response: ListEdgeDeploymentPlansCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1ListEdgeDeploymentPlansCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1ListEdgePackagingJobsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -13156,6 +13579,47 @@ const deserializeAws_json1_1ListProjectsCommandError = async ( } }; +export const deserializeAws_json1_1ListStageDevicesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1ListStageDevicesCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1ListStageDevicesResponse(data, context); + const response: ListStageDevicesCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1ListStageDevicesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1ListStudioLifecycleConfigsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -13974,6 +14438,44 @@ const deserializeAws_json1_1SendPipelineExecutionStepSuccessCommandError = async } }; +export const deserializeAws_json1_1StartEdgeDeploymentStageCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1StartEdgeDeploymentStageCommandError(output, context); + } + await collectBody(output.body, context); + const response: StartEdgeDeploymentStageCommandOutput = { + $metadata: deserializeMetadata(output), + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1StartEdgeDeploymentStageCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1StartMonitoringScheduleCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -14185,6 +14687,44 @@ const deserializeAws_json1_1StopCompilationJobCommandError = async ( } }; +export const deserializeAws_json1_1StopEdgeDeploymentStageCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1StopEdgeDeploymentStageCommandError(output, context); + } + await collectBody(output.body, context); + const response: StopEdgeDeploymentStageCommandOutput = { + $metadata: deserializeMetadata(output), + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1StopEdgeDeploymentStageCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1StopEdgePackagingJobCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -16836,6 +17376,31 @@ const serializeAws_json1_1CreateDomainRequest = (input: CreateDomainRequest, con }; }; +const serializeAws_json1_1CreateEdgeDeploymentPlanRequest = ( + input: CreateEdgeDeploymentPlanRequest, + context: __SerdeContext +): any => { + return { + ...(input.DeviceFleetName != null && { DeviceFleetName: input.DeviceFleetName }), + ...(input.EdgeDeploymentPlanName != null && { EdgeDeploymentPlanName: input.EdgeDeploymentPlanName }), + ...(input.ModelConfigs != null && { + ModelConfigs: serializeAws_json1_1EdgeDeploymentModelConfigs(input.ModelConfigs, context), + }), + ...(input.Stages != null && { Stages: serializeAws_json1_1DeploymentStages(input.Stages, context) }), + ...(input.Tags != null && { Tags: serializeAws_json1_1TagList(input.Tags, context) }), + }; +}; + +const serializeAws_json1_1CreateEdgeDeploymentStageRequest = ( + input: CreateEdgeDeploymentStageRequest, + context: __SerdeContext +): any => { + return { + ...(input.EdgeDeploymentPlanName != null && { EdgeDeploymentPlanName: input.EdgeDeploymentPlanName }), + ...(input.Stages != null && { Stages: serializeAws_json1_1DeploymentStages(input.Stages, context) }), + }; +}; + const serializeAws_json1_1CreateEdgePackagingJobRequest = ( input: CreateEdgePackagingJobRequest, context: __SerdeContext @@ -17912,6 +18477,25 @@ const serializeAws_json1_1DeleteDomainRequest = (input: DeleteDomainRequest, con }; }; +const serializeAws_json1_1DeleteEdgeDeploymentPlanRequest = ( + input: DeleteEdgeDeploymentPlanRequest, + context: __SerdeContext +): any => { + return { + ...(input.EdgeDeploymentPlanName != null && { EdgeDeploymentPlanName: input.EdgeDeploymentPlanName }), + }; +}; + +const serializeAws_json1_1DeleteEdgeDeploymentStageRequest = ( + input: DeleteEdgeDeploymentStageRequest, + context: __SerdeContext +): any => { + return { + ...(input.EdgeDeploymentPlanName != null && { EdgeDeploymentPlanName: input.EdgeDeploymentPlanName }), + ...(input.StageName != null && { StageName: input.StageName }), + }; +}; + const serializeAws_json1_1DeleteEndpointConfigInput = ( input: DeleteEndpointConfigInput, context: __SerdeContext @@ -18139,6 +18723,29 @@ const serializeAws_json1_1DeploymentConfig = (input: DeploymentConfig, context: }; }; +const serializeAws_json1_1DeploymentStage = (input: DeploymentStage, context: __SerdeContext): any => { + return { + ...(input.DeploymentConfig != null && { + DeploymentConfig: serializeAws_json1_1EdgeDeploymentConfig(input.DeploymentConfig, context), + }), + ...(input.DeviceSelectionConfig != null && { + DeviceSelectionConfig: serializeAws_json1_1DeviceSelectionConfig(input.DeviceSelectionConfig, context), + }), + ...(input.StageName != null && { StageName: input.StageName }), + }; +}; + +const serializeAws_json1_1DeploymentStages = (input: DeploymentStage[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_1DeploymentStage(entry, context); + }); +}; + const serializeAws_json1_1DeregisterDevicesRequest = ( input: DeregisterDevicesRequest, context: __SerdeContext @@ -18250,6 +18857,17 @@ const serializeAws_json1_1DescribeDomainRequest = (input: DescribeDomainRequest, }; }; +const serializeAws_json1_1DescribeEdgeDeploymentPlanRequest = ( + input: DescribeEdgeDeploymentPlanRequest, + context: __SerdeContext +): any => { + return { + ...(input.EdgeDeploymentPlanName != null && { EdgeDeploymentPlanName: input.EdgeDeploymentPlanName }), + ...(input.MaxResults != null && { MaxResults: input.MaxResults }), + ...(input.NextToken != null && { NextToken: input.NextToken }), + }; +}; + const serializeAws_json1_1DescribeEdgePackagingJobRequest = ( input: DescribeEdgePackagingJobRequest, context: __SerdeContext @@ -18625,6 +19243,15 @@ const serializeAws_json1_1Devices = (input: Device[], context: __SerdeContext): }); }; +const serializeAws_json1_1DeviceSelectionConfig = (input: DeviceSelectionConfig, context: __SerdeContext): any => { + return { + ...(input.DeviceNameContains != null && { DeviceNameContains: input.DeviceNameContains }), + ...(input.DeviceNames != null && { DeviceNames: serializeAws_json1_1DeviceNames(input.DeviceNames, context) }), + ...(input.DeviceSubsetType != null && { DeviceSubsetType: input.DeviceSubsetType }), + ...(input.Percentage != null && { Percentage: input.Percentage }), + }; +}; + const serializeAws_json1_1DisableSagemakerServicecatalogPortfolioInput = ( input: DisableSagemakerServicecatalogPortfolioInput, context: __SerdeContext @@ -18732,6 +19359,36 @@ const serializeAws_json1_1DriftCheckModelQuality = (input: DriftCheckModelQualit }; }; +const serializeAws_json1_1EdgeDeploymentConfig = (input: EdgeDeploymentConfig, context: __SerdeContext): any => { + return { + ...(input.FailureHandlingPolicy != null && { FailureHandlingPolicy: input.FailureHandlingPolicy }), + }; +}; + +const serializeAws_json1_1EdgeDeploymentModelConfig = ( + input: EdgeDeploymentModelConfig, + context: __SerdeContext +): any => { + return { + ...(input.EdgePackagingJobName != null && { EdgePackagingJobName: input.EdgePackagingJobName }), + ...(input.ModelHandle != null && { ModelHandle: input.ModelHandle }), + }; +}; + +const serializeAws_json1_1EdgeDeploymentModelConfigs = ( + input: EdgeDeploymentModelConfig[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_1EdgeDeploymentModelConfig(entry, context); + }); +}; + const serializeAws_json1_1EdgeOutputConfig = (input: EdgeOutputConfig, context: __SerdeContext): any => { return { ...(input.KmsKeyId != null && { KmsKeyId: input.KmsKeyId }), @@ -19895,6 +20552,30 @@ const serializeAws_json1_1ListDomainsRequest = (input: ListDomainsRequest, conte }; }; +const serializeAws_json1_1ListEdgeDeploymentPlansRequest = ( + input: ListEdgeDeploymentPlansRequest, + context: __SerdeContext +): any => { + return { + ...(input.CreationTimeAfter != null && { CreationTimeAfter: Math.round(input.CreationTimeAfter.getTime() / 1000) }), + ...(input.CreationTimeBefore != null && { + CreationTimeBefore: Math.round(input.CreationTimeBefore.getTime() / 1000), + }), + ...(input.DeviceFleetNameContains != null && { DeviceFleetNameContains: input.DeviceFleetNameContains }), + ...(input.LastModifiedTimeAfter != null && { + LastModifiedTimeAfter: Math.round(input.LastModifiedTimeAfter.getTime() / 1000), + }), + ...(input.LastModifiedTimeBefore != null && { + LastModifiedTimeBefore: Math.round(input.LastModifiedTimeBefore.getTime() / 1000), + }), + ...(input.MaxResults != null && { MaxResults: input.MaxResults }), + ...(input.NameContains != null && { NameContains: input.NameContains }), + ...(input.NextToken != null && { NextToken: input.NextToken }), + ...(input.SortBy != null && { SortBy: input.SortBy }), + ...(input.SortOrder != null && { SortOrder: input.SortOrder }), + }; +}; + const serializeAws_json1_1ListEdgePackagingJobsRequest = ( input: ListEdgePackagingJobsRequest, context: __SerdeContext @@ -20493,6 +21174,18 @@ const serializeAws_json1_1ListProjectsInput = (input: ListProjectsInput, context }; }; +const serializeAws_json1_1ListStageDevicesRequest = (input: ListStageDevicesRequest, context: __SerdeContext): any => { + return { + ...(input.EdgeDeploymentPlanName != null && { EdgeDeploymentPlanName: input.EdgeDeploymentPlanName }), + ...(input.ExcludeDevicesDeployedInOtherStage != null && { + ExcludeDevicesDeployedInOtherStage: input.ExcludeDevicesDeployedInOtherStage, + }), + ...(input.MaxResults != null && { MaxResults: input.MaxResults }), + ...(input.NextToken != null && { NextToken: input.NextToken }), + ...(input.StageName != null && { StageName: input.StageName }), + }; +}; + const serializeAws_json1_1ListStudioLifecycleConfigsRequest = ( input: ListStudioLifecycleConfigsRequest, context: __SerdeContext @@ -22346,6 +23039,16 @@ const serializeAws_json1_1SourceIpConfig = (input: SourceIpConfig, context: __Se }; }; +const serializeAws_json1_1StartEdgeDeploymentStageRequest = ( + input: StartEdgeDeploymentStageRequest, + context: __SerdeContext +): any => { + return { + ...(input.EdgeDeploymentPlanName != null && { EdgeDeploymentPlanName: input.EdgeDeploymentPlanName }), + ...(input.StageName != null && { StageName: input.StageName }), + }; +}; + const serializeAws_json1_1StartMonitoringScheduleRequest = ( input: StartMonitoringScheduleRequest, context: __SerdeContext @@ -22401,6 +23104,16 @@ const serializeAws_json1_1StopCompilationJobRequest = ( }; }; +const serializeAws_json1_1StopEdgeDeploymentStageRequest = ( + input: StopEdgeDeploymentStageRequest, + context: __SerdeContext +): any => { + return { + ...(input.EdgeDeploymentPlanName != null && { EdgeDeploymentPlanName: input.EdgeDeploymentPlanName }), + ...(input.StageName != null && { StageName: input.StageName }), + }; +}; + const serializeAws_json1_1StopEdgePackagingJobRequest = ( input: StopEdgePackagingJobRequest, context: __SerdeContext @@ -24860,6 +25573,15 @@ const deserializeAws_json1_1CreateDomainResponse = (output: any, context: __Serd } as any; }; +const deserializeAws_json1_1CreateEdgeDeploymentPlanResponse = ( + output: any, + context: __SerdeContext +): CreateEdgeDeploymentPlanResponse => { + return { + EdgeDeploymentPlanArn: __expectString(output.EdgeDeploymentPlanArn), + } as any; +}; + const deserializeAws_json1_1CreateEndpointConfigOutput = ( output: any, context: __SerdeContext @@ -25509,6 +26231,42 @@ const deserializeAws_json1_1DeploymentConfig = (output: any, context: __SerdeCon } as any; }; +const deserializeAws_json1_1DeploymentStageStatusSummaries = ( + output: any, + context: __SerdeContext +): DeploymentStageStatusSummary[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1DeploymentStageStatusSummary(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_1DeploymentStageStatusSummary = ( + output: any, + context: __SerdeContext +): DeploymentStageStatusSummary => { + return { + DeploymentConfig: + output.DeploymentConfig != null + ? deserializeAws_json1_1EdgeDeploymentConfig(output.DeploymentConfig, context) + : undefined, + DeploymentStatus: + output.DeploymentStatus != null + ? deserializeAws_json1_1EdgeDeploymentStatus(output.DeploymentStatus, context) + : undefined, + DeviceSelectionConfig: + output.DeviceSelectionConfig != null + ? deserializeAws_json1_1DeviceSelectionConfig(output.DeviceSelectionConfig, context) + : undefined, + StageName: __expectString(output.StageName), + } as any; +}; + const deserializeAws_json1_1DescribeActionResponse = (output: any, context: __SerdeContext): DescribeActionResponse => { return { ActionArn: __expectString(output.ActionArn), @@ -25925,6 +26683,35 @@ const deserializeAws_json1_1DescribeDomainResponse = (output: any, context: __Se } as any; }; +const deserializeAws_json1_1DescribeEdgeDeploymentPlanResponse = ( + output: any, + context: __SerdeContext +): DescribeEdgeDeploymentPlanResponse => { + return { + CreationTime: + output.CreationTime != null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime))) + : undefined, + DeviceFleetName: __expectString(output.DeviceFleetName), + EdgeDeploymentFailed: __expectInt32(output.EdgeDeploymentFailed), + EdgeDeploymentPending: __expectInt32(output.EdgeDeploymentPending), + EdgeDeploymentPlanArn: __expectString(output.EdgeDeploymentPlanArn), + EdgeDeploymentPlanName: __expectString(output.EdgeDeploymentPlanName), + EdgeDeploymentSuccess: __expectInt32(output.EdgeDeploymentSuccess), + LastModifiedTime: + output.LastModifiedTime != null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastModifiedTime))) + : undefined, + ModelConfigs: + output.ModelConfigs != null + ? deserializeAws_json1_1EdgeDeploymentModelConfigs(output.ModelConfigs, context) + : undefined, + NextToken: __expectString(output.NextToken), + Stages: + output.Stages != null ? deserializeAws_json1_1DeploymentStageStatusSummaries(output.Stages, context) : undefined, + } as any; +}; + const deserializeAws_json1_1DescribeEdgePackagingJobResponse = ( output: any, context: __SerdeContext @@ -27203,6 +27990,43 @@ const deserializeAws_json1_1DescribeWorkteamResponse = ( } as any; }; +const deserializeAws_json1_1DeviceDeploymentSummaries = ( + output: any, + context: __SerdeContext +): DeviceDeploymentSummary[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1DeviceDeploymentSummary(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_1DeviceDeploymentSummary = ( + output: any, + context: __SerdeContext +): DeviceDeploymentSummary => { + return { + DeployedStageName: __expectString(output.DeployedStageName), + DeploymentStartTime: + output.DeploymentStartTime != null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.DeploymentStartTime))) + : undefined, + Description: __expectString(output.Description), + DeviceArn: __expectString(output.DeviceArn), + DeviceDeploymentStatus: __expectString(output.DeviceDeploymentStatus), + DeviceDeploymentStatusMessage: __expectString(output.DeviceDeploymentStatusMessage), + DeviceFleetName: __expectString(output.DeviceFleetName), + DeviceName: __expectString(output.DeviceName), + EdgeDeploymentPlanArn: __expectString(output.EdgeDeploymentPlanArn), + EdgeDeploymentPlanName: __expectString(output.EdgeDeploymentPlanName), + StageName: __expectString(output.StageName), + } as any; +}; + const deserializeAws_json1_1DeviceFleetSummaries = (output: any, context: __SerdeContext): DeviceFleetSummary[] => { const retVal = (output || []) .filter((e: any) => e != null) @@ -27230,6 +28054,28 @@ const deserializeAws_json1_1DeviceFleetSummary = (output: any, context: __SerdeC } as any; }; +const deserializeAws_json1_1DeviceNames = (output: any, context: __SerdeContext): string[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); + return retVal; +}; + +const deserializeAws_json1_1DeviceSelectionConfig = (output: any, context: __SerdeContext): DeviceSelectionConfig => { + return { + DeviceNameContains: __expectString(output.DeviceNameContains), + DeviceNames: + output.DeviceNames != null ? deserializeAws_json1_1DeviceNames(output.DeviceNames, context) : undefined, + DeviceSubsetType: __expectString(output.DeviceSubsetType), + Percentage: __expectInt32(output.Percentage), + } as any; +}; + const deserializeAws_json1_1DeviceStats = (output: any, context: __SerdeContext): DeviceStats => { return { ConnectedDeviceCount: __expectLong(output.ConnectedDeviceCount), @@ -27411,6 +28257,88 @@ const deserializeAws_json1_1Edge = (output: any, context: __SerdeContext): Edge } as any; }; +const deserializeAws_json1_1EdgeDeploymentConfig = (output: any, context: __SerdeContext): EdgeDeploymentConfig => { + return { + FailureHandlingPolicy: __expectString(output.FailureHandlingPolicy), + } as any; +}; + +const deserializeAws_json1_1EdgeDeploymentModelConfig = ( + output: any, + context: __SerdeContext +): EdgeDeploymentModelConfig => { + return { + EdgePackagingJobName: __expectString(output.EdgePackagingJobName), + ModelHandle: __expectString(output.ModelHandle), + } as any; +}; + +const deserializeAws_json1_1EdgeDeploymentModelConfigs = ( + output: any, + context: __SerdeContext +): EdgeDeploymentModelConfig[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1EdgeDeploymentModelConfig(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_1EdgeDeploymentPlanSummaries = ( + output: any, + context: __SerdeContext +): EdgeDeploymentPlanSummary[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1EdgeDeploymentPlanSummary(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_1EdgeDeploymentPlanSummary = ( + output: any, + context: __SerdeContext +): EdgeDeploymentPlanSummary => { + return { + CreationTime: + output.CreationTime != null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime))) + : undefined, + DeviceFleetName: __expectString(output.DeviceFleetName), + EdgeDeploymentFailed: __expectInt32(output.EdgeDeploymentFailed), + EdgeDeploymentPending: __expectInt32(output.EdgeDeploymentPending), + EdgeDeploymentPlanArn: __expectString(output.EdgeDeploymentPlanArn), + EdgeDeploymentPlanName: __expectString(output.EdgeDeploymentPlanName), + EdgeDeploymentSuccess: __expectInt32(output.EdgeDeploymentSuccess), + LastModifiedTime: + output.LastModifiedTime != null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastModifiedTime))) + : undefined, + } as any; +}; + +const deserializeAws_json1_1EdgeDeploymentStatus = (output: any, context: __SerdeContext): EdgeDeploymentStatus => { + return { + EdgeDeploymentFailedInStage: __expectInt32(output.EdgeDeploymentFailedInStage), + EdgeDeploymentPendingInStage: __expectInt32(output.EdgeDeploymentPendingInStage), + EdgeDeploymentStageStartTime: + output.EdgeDeploymentStageStartTime != null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.EdgeDeploymentStageStartTime))) + : undefined, + EdgeDeploymentStatusMessage: __expectString(output.EdgeDeploymentStatusMessage), + EdgeDeploymentSuccessInStage: __expectInt32(output.EdgeDeploymentSuccessInStage), + StageStatus: __expectString(output.StageStatus), + } as any; +}; + const deserializeAws_json1_1EdgeModel = (output: any, context: __SerdeContext): EdgeModel => { return { LatestInference: @@ -29352,6 +30280,19 @@ const deserializeAws_json1_1ListDomainsResponse = (output: any, context: __Serde } as any; }; +const deserializeAws_json1_1ListEdgeDeploymentPlansResponse = ( + output: any, + context: __SerdeContext +): ListEdgeDeploymentPlansResponse => { + return { + EdgeDeploymentPlanSummaries: + output.EdgeDeploymentPlanSummaries != null + ? deserializeAws_json1_1EdgeDeploymentPlanSummaries(output.EdgeDeploymentPlanSummaries, context) + : undefined, + NextToken: __expectString(output.NextToken), + } as any; +}; + const deserializeAws_json1_1ListEdgePackagingJobsResponse = ( output: any, context: __SerdeContext @@ -29733,6 +30674,19 @@ const deserializeAws_json1_1ListProjectsOutput = (output: any, context: __SerdeC } as any; }; +const deserializeAws_json1_1ListStageDevicesResponse = ( + output: any, + context: __SerdeContext +): ListStageDevicesResponse => { + return { + DeviceDeploymentSummaries: + output.DeviceDeploymentSummaries != null + ? deserializeAws_json1_1DeviceDeploymentSummaries(output.DeviceDeploymentSummaries, context) + : undefined, + NextToken: __expectString(output.NextToken), + } as any; +}; + const deserializeAws_json1_1ListStudioLifecycleConfigsResponse = ( output: any, context: __SerdeContext diff --git a/codegen/sdk-codegen/aws-models/sagemaker.json b/codegen/sdk-codegen/aws-models/sagemaker.json index 0091baa492e9f..0786d0856619c 100644 --- a/codegen/sdk-codegen/aws-models/sagemaker.json +++ b/codegen/sdk-codegen/aws-models/sagemaker.json @@ -5360,6 +5360,109 @@ } } }, + "com.amazonaws.sagemaker#CreateEdgeDeploymentPlan": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#CreateEdgeDeploymentPlanRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#CreateEdgeDeploymentPlanResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceLimitExceeded" + } + ], + "traits": { + "smithy.api#documentation": "

        Creates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment configuration and devices.

        " + } + }, + "com.amazonaws.sagemaker#CreateEdgeDeploymentPlanRequest": { + "type": "structure", + "members": { + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "ModelConfigs": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentModelConfigs", + "traits": { + "smithy.api#documentation": "

        List of models associated with the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "DeviceFleetName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The device fleet used for this edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "Stages": { + "target": "com.amazonaws.sagemaker#DeploymentStages", + "traits": { + "smithy.api#documentation": "

        List of stages of the edge deployment plan. The number of stages is limited to 10 per deployment.

        " + } + }, + "Tags": { + "target": "com.amazonaws.sagemaker#TagList", + "traits": { + "smithy.api#documentation": "

        List of tags with which to tag the edge deployment plan.

        " + } + } + } + }, + "com.amazonaws.sagemaker#CreateEdgeDeploymentPlanResponse": { + "type": "structure", + "members": { + "EdgeDeploymentPlanArn": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentPlanArn", + "traits": { + "smithy.api#documentation": "

        The ARN of the edge deployment plan.

        ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.sagemaker#CreateEdgeDeploymentStage": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#CreateEdgeDeploymentStageRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceLimitExceeded" + } + ], + "traits": { + "smithy.api#documentation": "

        Creates a new stage in an existing edge deployment plan.

        " + } + }, + "com.amazonaws.sagemaker#CreateEdgeDeploymentStageRequest": { + "type": "structure", + "members": { + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "Stages": { + "target": "com.amazonaws.sagemaker#DeploymentStages", + "traits": { + "smithy.api#documentation": "

        List of stages to be added to the edge deployment plan.

        ", + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.sagemaker#CreateEdgePackagingJob": { "type": "operation", "input": { @@ -9142,6 +9245,71 @@ } } }, + "com.amazonaws.sagemaker#DeleteEdgeDeploymentPlan": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DeleteEdgeDeploymentPlanRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceInUse" + } + ], + "traits": { + "smithy.api#documentation": "

        Deletes an edge deployment plan if (and only if) all the stages in the plan are inactive or there are no stages in the plan.

        " + } + }, + "com.amazonaws.sagemaker#DeleteEdgeDeploymentPlanRequest": { + "type": "structure", + "members": { + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan to delete.

        ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.sagemaker#DeleteEdgeDeploymentStage": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DeleteEdgeDeploymentStageRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceInUse" + } + ], + "traits": { + "smithy.api#documentation": "

        Delete a stage in an edge deployment plan if (and only if) the stage is inactive.

        " + } + }, + "com.amazonaws.sagemaker#DeleteEdgeDeploymentStageRequest": { + "type": "structure", + "members": { + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan from which the stage will be deleted.

        ", + "smithy.api#required": {} + } + }, + "StageName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the stage.

        ", + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.sagemaker#DeleteEndpoint": { "type": "operation", "input": { @@ -10062,6 +10230,90 @@ "smithy.api#documentation": "

        The deployment configuration for an endpoint, which contains the desired deployment\n strategy and rollback configurations.

        " } }, + "com.amazonaws.sagemaker#DeploymentStage": { + "type": "structure", + "members": { + "StageName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the stage.

        ", + "smithy.api#required": {} + } + }, + "DeviceSelectionConfig": { + "target": "com.amazonaws.sagemaker#DeviceSelectionConfig", + "traits": { + "smithy.api#documentation": "

        Configuration of the devices in the stage.

        ", + "smithy.api#required": {} + } + }, + "DeploymentConfig": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentConfig", + "traits": { + "smithy.api#documentation": "

        Configuration of the deployment details.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        Contains information about a stage in an edge deployment plan.

        " + } + }, + "com.amazonaws.sagemaker#DeploymentStageMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "max": 10 + } + } + }, + "com.amazonaws.sagemaker#DeploymentStageStatusSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#DeploymentStageStatusSummary" + } + }, + "com.amazonaws.sagemaker#DeploymentStageStatusSummary": { + "type": "structure", + "members": { + "StageName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the stage.

        ", + "smithy.api#required": {} + } + }, + "DeviceSelectionConfig": { + "target": "com.amazonaws.sagemaker#DeviceSelectionConfig", + "traits": { + "smithy.api#documentation": "

        Configuration of the devices in the stage.

        ", + "smithy.api#required": {} + } + }, + "DeploymentConfig": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentConfig", + "traits": { + "smithy.api#documentation": "

        Configuration of the deployment details.

        ", + "smithy.api#required": {} + } + }, + "DeploymentStatus": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentStatus", + "traits": { + "smithy.api#documentation": "

        General status of the current state.

        ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

        Contains information summarizing the deployment stage results.

        " + } + }, + "com.amazonaws.sagemaker#DeploymentStages": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#DeploymentStage" + } + }, "com.amazonaws.sagemaker#DeregisterDevices": { "type": "operation", "input": { @@ -11509,6 +11761,123 @@ } } }, + "com.amazonaws.sagemaker#DescribeEdgeDeploymentPlan": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DescribeEdgeDeploymentPlanRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#DescribeEdgeDeploymentPlanResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

        Describes an edge deployment plan with deployment status per stage.

        " + } + }, + "com.amazonaws.sagemaker#DescribeEdgeDeploymentPlanRequest": { + "type": "structure", + "members": { + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the deployment plan to describe.

        ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

        If the edge deployment plan has enough stages to require tokening, then this is the response from the last list of stages returned.

        " + } + }, + "MaxResults": { + "target": "com.amazonaws.sagemaker#DeploymentStageMaxResults", + "traits": { + "smithy.api#documentation": "

        The maximum number of results to select (50 by default).

        " + } + } + } + }, + "com.amazonaws.sagemaker#DescribeEdgeDeploymentPlanResponse": { + "type": "structure", + "members": { + "EdgeDeploymentPlanArn": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentPlanArn", + "traits": { + "smithy.api#documentation": "

        The ARN of edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "ModelConfigs": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentModelConfigs", + "traits": { + "smithy.api#documentation": "

        List of models associated with the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "DeviceFleetName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The device fleet used for this edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentSuccess": { + "target": "com.amazonaws.sagemaker#Integer", + "traits": { + "smithy.api#documentation": "

        The number of edge devices with the successful deployment.

        " + } + }, + "EdgeDeploymentPending": { + "target": "com.amazonaws.sagemaker#Integer", + "traits": { + "smithy.api#documentation": "

        The number of edge devices yet to pick up deployment, or in progress.

        " + } + }, + "EdgeDeploymentFailed": { + "target": "com.amazonaws.sagemaker#Integer", + "traits": { + "smithy.api#documentation": "

        The number of edge devices that failed the deployment.

        " + } + }, + "Stages": { + "target": "com.amazonaws.sagemaker#DeploymentStageStatusSummaries", + "traits": { + "smithy.api#documentation": "

        List of stages in the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

        Token to use when calling the next set of stages in the edge deployment plan.

        " + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        The time when the edge deployment plan was created.

        " + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        The time when the edge deployment plan was last updated.

        " + } + } + } + }, "com.amazonaws.sagemaker#DescribeEdgePackagingJob": { "type": "operation", "input": { @@ -15936,6 +16305,122 @@ "smithy.api#pattern": "^arn:aws[a-z\\-]*:[a-z\\-]*:[a-z\\-]*:\\d{12}:[a-z\\-]*/?[a-zA-Z_0-9+=,.@\\-_/]+$" } }, + "com.amazonaws.sagemaker#DeviceDeploymentStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "READYTODEPLOY", + "name": "ReadyToDeploy" + }, + { + "value": "INPROGRESS", + "name": "InProgress" + }, + { + "value": "DEPLOYED", + "name": "Deployed" + }, + { + "value": "FAILED", + "name": "Failed" + }, + { + "value": "STOPPING", + "name": "Stopping" + }, + { + "value": "STOPPED", + "name": "Stopped" + } + ] + } + }, + "com.amazonaws.sagemaker#DeviceDeploymentSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#DeviceDeploymentSummary" + } + }, + "com.amazonaws.sagemaker#DeviceDeploymentSummary": { + "type": "structure", + "members": { + "EdgeDeploymentPlanArn": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentPlanArn", + "traits": { + "smithy.api#documentation": "

        The ARN of the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "StageName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the stage in the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "DeployedStageName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the deployed stage.

        " + } + }, + "DeviceFleetName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the fleet to which the device belongs to.

        " + } + }, + "DeviceName": { + "target": "com.amazonaws.sagemaker#DeviceName", + "traits": { + "smithy.api#documentation": "

        The name of the device.

        ", + "smithy.api#required": {} + } + }, + "DeviceArn": { + "target": "com.amazonaws.sagemaker#DeviceArn", + "traits": { + "smithy.api#documentation": "

        The ARN of the device.

        ", + "smithy.api#required": {} + } + }, + "DeviceDeploymentStatus": { + "target": "com.amazonaws.sagemaker#DeviceDeploymentStatus", + "traits": { + "smithy.api#documentation": "

        The deployment status of the device.

        " + } + }, + "DeviceDeploymentStatusMessage": { + "target": "com.amazonaws.sagemaker#String", + "traits": { + "smithy.api#documentation": "

        The detailed error message for the deployoment status result.

        " + } + }, + "Description": { + "target": "com.amazonaws.sagemaker#DeviceDescription", + "traits": { + "smithy.api#documentation": "

        The description of the device.

        " + } + }, + "DeploymentStartTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        The time when the deployment on the device started.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        Contains information summarizing device details and deployment status.

        " + } + }, "com.amazonaws.sagemaker#DeviceDescription": { "type": "string", "traits": { @@ -16018,6 +16503,39 @@ "target": "com.amazonaws.sagemaker#DeviceName" } }, + "com.amazonaws.sagemaker#DeviceSelectionConfig": { + "type": "structure", + "members": { + "DeviceSubsetType": { + "target": "com.amazonaws.sagemaker#DeviceSubsetType", + "traits": { + "smithy.api#documentation": "

        Type of device subsets to deploy to the current stage.

        ", + "smithy.api#required": {} + } + }, + "Percentage": { + "target": "com.amazonaws.sagemaker#Percentage", + "traits": { + "smithy.api#documentation": "

        Percentage of devices in the fleet to deploy to the current stage.

        " + } + }, + "DeviceNames": { + "target": "com.amazonaws.sagemaker#DeviceNames", + "traits": { + "smithy.api#documentation": "

        List of devices chosen to deploy.

        " + } + }, + "DeviceNameContains": { + "target": "com.amazonaws.sagemaker#DeviceName", + "traits": { + "smithy.api#documentation": "

        A filter to select devices with names containing this name.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        Contains information about the configurations of selected devices.

        " + } + }, "com.amazonaws.sagemaker#DeviceStats": { "type": "structure", "members": { @@ -16040,6 +16558,25 @@ "smithy.api#documentation": "

        Status of devices.

        " } }, + "com.amazonaws.sagemaker#DeviceSubsetType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PERCENTAGE", + "name": "Percentage" + }, + { + "value": "SELECTION", + "name": "Selection" + }, + { + "value": "NAMECONTAINS", + "name": "NameContains" + } + ] + } + }, "com.amazonaws.sagemaker#DeviceSummaries": { "type": "list", "member": { @@ -16605,6 +17142,175 @@ "smithy.api#documentation": "

        A directed edge connecting two lineage entities.

        " } }, + "com.amazonaws.sagemaker#EdgeDeploymentConfig": { + "type": "structure", + "members": { + "FailureHandlingPolicy": { + "target": "com.amazonaws.sagemaker#FailureHandlingPolicy", + "traits": { + "smithy.api#documentation": "

        Toggle that determines whether to rollback to previous configuration if the current deployment fails.\n By default this is turned on. You may turn this off if you want to investigate the errors yourself.

        ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

        Contains information about the configuration of a deployment.

        " + } + }, + "com.amazonaws.sagemaker#EdgeDeploymentModelConfig": { + "type": "structure", + "members": { + "ModelHandle": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name the device application uses to reference this model.

        ", + "smithy.api#required": {} + } + }, + "EdgePackagingJobName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The edge packaging job associated with this deployment.

        ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

        Contains information about the configuration of a model in a deployment.

        " + } + }, + "com.amazonaws.sagemaker#EdgeDeploymentModelConfigs": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentModelConfig" + } + }, + "com.amazonaws.sagemaker#EdgeDeploymentPlanArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws[a-z\\-]*:sagemaker:[a-z\\-]*:\\d{12}:edge-deployment/?[a-zA-Z_0-9+=,.@\\-_/]+$" + } + }, + "com.amazonaws.sagemaker#EdgeDeploymentPlanSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentPlanSummary" + } + }, + "com.amazonaws.sagemaker#EdgeDeploymentPlanSummary": { + "type": "structure", + "members": { + "EdgeDeploymentPlanArn": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentPlanArn", + "traits": { + "smithy.api#documentation": "

        The ARN of the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "DeviceFleetName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the device fleet used for the deployment.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentSuccess": { + "target": "com.amazonaws.sagemaker#Integer", + "traits": { + "smithy.api#documentation": "

        The number of edge devices with the successful deployment.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentPending": { + "target": "com.amazonaws.sagemaker#Integer", + "traits": { + "smithy.api#documentation": "

        The number of edge devices yet to pick up the deployment, or in progress.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentFailed": { + "target": "com.amazonaws.sagemaker#Integer", + "traits": { + "smithy.api#documentation": "

        The number of edge devices that failed the deployment.

        ", + "smithy.api#required": {} + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        The time when the edge deployment plan was created.

        " + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        The time when the edge deployment plan was last updated.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        Contains information summarizing an edge deployment plan.

        " + } + }, + "com.amazonaws.sagemaker#EdgeDeploymentStatus": { + "type": "structure", + "members": { + "StageStatus": { + "target": "com.amazonaws.sagemaker#StageStatus", + "traits": { + "smithy.api#documentation": "

        The general status of the current stage.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentSuccessInStage": { + "target": "com.amazonaws.sagemaker#Integer", + "traits": { + "smithy.api#documentation": "

        The number of edge devices with the successful deployment in the current stage.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentPendingInStage": { + "target": "com.amazonaws.sagemaker#Integer", + "traits": { + "smithy.api#documentation": "

        The number of edge devices yet to pick up the deployment in current stage, or in progress.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentFailedInStage": { + "target": "com.amazonaws.sagemaker#Integer", + "traits": { + "smithy.api#documentation": "

        The number of edge devices that failed the deployment in current stage.

        ", + "smithy.api#required": {} + } + }, + "EdgeDeploymentStatusMessage": { + "target": "com.amazonaws.sagemaker#String", + "traits": { + "smithy.api#documentation": "

        A detailed message about deployment status in current stage.

        " + } + }, + "EdgeDeploymentStageStartTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        The time when the deployment API started.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        Contains information summarizing the deployment stage results.

        " + } + }, "com.amazonaws.sagemaker#EdgeModel": { "type": "structure", "members": { @@ -17837,6 +18543,21 @@ "smithy.api#documentation": "

        The container for the metadata for Fail step.

        " } }, + "com.amazonaws.sagemaker#FailureHandlingPolicy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ROLLBACK_ON_FAILURE", + "name": "RollbackOnFailure" + }, + { + "value": "DO_NOTHING", + "name": "DoNothing" + } + ] + } + }, "com.amazonaws.sagemaker#FailureReason": { "type": "string", "traits": { @@ -23477,6 +24198,131 @@ } } }, + "com.amazonaws.sagemaker#ListEdgeDeploymentPlans": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#ListEdgeDeploymentPlansRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#ListEdgeDeploymentPlansResponse" + }, + "traits": { + "smithy.api#documentation": "

        Lists all edge deployment plans.

        ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "EdgeDeploymentPlanSummaries", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.sagemaker#ListEdgeDeploymentPlansRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

        The response from the last list when returning a list large enough to need tokening.

        " + } + }, + "MaxResults": { + "target": "com.amazonaws.sagemaker#ListMaxResults", + "traits": { + "smithy.api#box": {}, + "smithy.api#documentation": "

        The maximum number of results to select (50 by default).

        " + } + }, + "CreationTimeAfter": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        Selects edge deployment plans created after this time.

        " + } + }, + "CreationTimeBefore": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        Selects edge deployment plans created before this time.

        " + } + }, + "LastModifiedTimeAfter": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        Selects edge deployment plans that were last updated after this time.

        " + } + }, + "LastModifiedTimeBefore": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

        Selects edge deployment plans that were last updated before this time.

        " + } + }, + "NameContains": { + "target": "com.amazonaws.sagemaker#NameContains", + "traits": { + "smithy.api#documentation": "

        Selects edge deployment plans with names containing this name.

        " + } + }, + "DeviceFleetNameContains": { + "target": "com.amazonaws.sagemaker#NameContains", + "traits": { + "smithy.api#documentation": "

        Selects edge deployment plans with a device fleet name containing this name.

        " + } + }, + "SortBy": { + "target": "com.amazonaws.sagemaker#ListEdgeDeploymentPlansSortBy", + "traits": { + "smithy.api#documentation": "

        The column by which to sort the edge deployment plans. Can be one of NAME, DEVICEFLEETNAME, CREATIONTIME, LASTMODIFIEDTIME.

        " + } + }, + "SortOrder": { + "target": "com.amazonaws.sagemaker#SortOrder", + "traits": { + "smithy.api#documentation": "

        The direction of the sorting (ascending or descending).

        " + } + } + } + }, + "com.amazonaws.sagemaker#ListEdgeDeploymentPlansResponse": { + "type": "structure", + "members": { + "EdgeDeploymentPlanSummaries": { + "target": "com.amazonaws.sagemaker#EdgeDeploymentPlanSummaries", + "traits": { + "smithy.api#documentation": "

        List of summaries of edge deployment plans.

        ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

        The token to use when calling the next page of results.

        " + } + } + } + }, + "com.amazonaws.sagemaker#ListEdgeDeploymentPlansSortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NAME", + "name": "Name" + }, + { + "value": "DEVICE_FLEET_NAME", + "name": "DeviceFleetName" + }, + { + "value": "CREATION_TIME", + "name": "CreationTime" + }, + { + "value": "LAST_MODIFIED_TIME", + "name": "LastModifiedTime" + } + ] + } + }, "com.amazonaws.sagemaker#ListEdgePackagingJobs": { "type": "operation", "input": { @@ -26360,6 +27206,80 @@ } } }, + "com.amazonaws.sagemaker#ListStageDevices": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#ListStageDevicesRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#ListStageDevicesResponse" + }, + "traits": { + "smithy.api#documentation": "

        Lists devices allocated to the stage, containing detailed device information and deployment status.

        ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "DeviceDeploymentSummaries", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.sagemaker#ListStageDevicesRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

        The response from the last list when returning a list large enough to neeed tokening.

        " + } + }, + "MaxResults": { + "target": "com.amazonaws.sagemaker#ListMaxResults", + "traits": { + "smithy.api#box": {}, + "smithy.api#documentation": "

        The maximum number of requests to select.

        " + } + }, + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan.

        ", + "smithy.api#required": {} + } + }, + "ExcludeDevicesDeployedInOtherStage": { + "target": "com.amazonaws.sagemaker#Boolean", + "traits": { + "smithy.api#documentation": "

        Toggle for excluding devices deployed in other stages.

        " + } + }, + "StageName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the stage in the deployment.

        ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.sagemaker#ListStageDevicesResponse": { + "type": "structure", + "members": { + "DeviceDeploymentSummaries": { + "target": "com.amazonaws.sagemaker#DeviceDeploymentSummaries", + "traits": { + "smithy.api#documentation": "

        List of summaries of devices allocated to the stage.

        ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

        The token to use when calling the next page of results.

        " + } + } + } + }, "com.amazonaws.sagemaker#ListStudioLifecycleConfigs": { "type": "operation", "input": { @@ -31381,6 +32301,14 @@ } } }, + "com.amazonaws.sagemaker#Percentage": { + "type": "integer", + "traits": { + "smithy.api#range": { + "max": 100 + } + } + }, "com.amazonaws.sagemaker#Phase": { "type": "structure", "members": { @@ -35774,6 +36702,12 @@ { "target": "com.amazonaws.sagemaker#CreateDomain" }, + { + "target": "com.amazonaws.sagemaker#CreateEdgeDeploymentPlan" + }, + { + "target": "com.amazonaws.sagemaker#CreateEdgeDeploymentStage" + }, { "target": "com.amazonaws.sagemaker#CreateEdgePackagingJob" }, @@ -35909,6 +36843,12 @@ { "target": "com.amazonaws.sagemaker#DeleteDomain" }, + { + "target": "com.amazonaws.sagemaker#DeleteEdgeDeploymentPlan" + }, + { + "target": "com.amazonaws.sagemaker#DeleteEdgeDeploymentStage" + }, { "target": "com.amazonaws.sagemaker#DeleteEndpoint" }, @@ -36032,6 +36972,9 @@ { "target": "com.amazonaws.sagemaker#DescribeDomain" }, + { + "target": "com.amazonaws.sagemaker#DescribeEdgeDeploymentPlan" + }, { "target": "com.amazonaws.sagemaker#DescribeEdgePackagingJob" }, @@ -36212,6 +37155,9 @@ { "target": "com.amazonaws.sagemaker#ListDomains" }, + { + "target": "com.amazonaws.sagemaker#ListEdgeDeploymentPlans" + }, { "target": "com.amazonaws.sagemaker#ListEdgePackagingJobs" }, @@ -36305,6 +37251,9 @@ { "target": "com.amazonaws.sagemaker#ListProjects" }, + { + "target": "com.amazonaws.sagemaker#ListStageDevices" + }, { "target": "com.amazonaws.sagemaker#ListStudioLifecycleConfigs" }, @@ -36362,6 +37311,9 @@ { "target": "com.amazonaws.sagemaker#SendPipelineExecutionStepSuccess" }, + { + "target": "com.amazonaws.sagemaker#StartEdgeDeploymentStage" + }, { "target": "com.amazonaws.sagemaker#StartMonitoringSchedule" }, @@ -36377,6 +37329,9 @@ { "target": "com.amazonaws.sagemaker#StopCompilationJob" }, + { + "target": "com.amazonaws.sagemaker#StopEdgeDeploymentStage" + }, { "target": "com.amazonaws.sagemaker#StopEdgePackagingJob" }, @@ -37501,6 +38456,76 @@ ] } }, + "com.amazonaws.sagemaker#StageStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATING", + "name": "Creating" + }, + { + "value": "READYTODEPLOY", + "name": "ReadyToDeploy" + }, + { + "value": "STARTING", + "name": "Starting" + }, + { + "value": "INPROGRESS", + "name": "InProgress" + }, + { + "value": "DEPLOYED", + "name": "Deployed" + }, + { + "value": "FAILED", + "name": "Failed" + }, + { + "value": "STOPPING", + "name": "Stopping" + }, + { + "value": "STOPPED", + "name": "Stopped" + } + ] + } + }, + "com.amazonaws.sagemaker#StartEdgeDeploymentStage": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#StartEdgeDeploymentStageRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "traits": { + "smithy.api#documentation": "

        Starts a stage in an edge deployment plan.

        " + } + }, + "com.amazonaws.sagemaker#StartEdgeDeploymentStageRequest": { + "type": "structure", + "members": { + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan to start.

        ", + "smithy.api#required": {} + } + }, + "StageName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the stage to start.

        ", + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.sagemaker#StartMonitoringSchedule": { "type": "operation", "input": { @@ -37766,6 +38791,37 @@ } } }, + "com.amazonaws.sagemaker#StopEdgeDeploymentStage": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#StopEdgeDeploymentStageRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "traits": { + "smithy.api#documentation": "

        Stops a stage in an edge deployment plan.

        " + } + }, + "com.amazonaws.sagemaker#StopEdgeDeploymentStageRequest": { + "type": "structure", + "members": { + "EdgeDeploymentPlanName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the edge deployment plan to stop.

        ", + "smithy.api#required": {} + } + }, + "StageName": { + "target": "com.amazonaws.sagemaker#EntityName", + "traits": { + "smithy.api#documentation": "

        The name of the stage to stop.

        ", + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.sagemaker#StopEdgePackagingJob": { "type": "operation", "input": { From 87e9e2970f5fd24742968462e3944235c5e08380 Mon Sep 17 00:00:00 2001 From: awstools Date: Mon, 18 Jul 2022 18:30:53 +0000 Subject: [PATCH 08/31] feat(clients): update client endpoints as of 2022-07-18 --- clients/client-health/src/endpoints.ts | 26 +++++++++++- .../aws/typescript/codegen/endpoints.json | 40 +++++++++++++++++-- 2 files changed, 60 insertions(+), 6 deletions(-) diff --git a/clients/client-health/src/endpoints.ts b/clients/client-health/src/endpoints.ts index 0aef8959ef55d..4f40723d052a3 100644 --- a/clients/client-health/src/endpoints.ts +++ b/clients/client-health/src/endpoints.ts @@ -2,7 +2,26 @@ import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; -const regionHash: RegionHash = {}; +const regionHash: RegionHash = { + "aws-cn-global": { + variants: [ + { + hostname: "global.health.amazonaws.com.cn", + tags: [], + }, + ], + signingRegion: "cn-northwest-1", + }, + "aws-global": { + variants: [ + { + hostname: "global.health.amazonaws.com", + tags: [], + }, + ], + signingRegion: "us-east-1", + }, +}; const partitionHash: PartitionHash = { aws: { @@ -16,6 +35,7 @@ const partitionHash: PartitionHash = { "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", + "aws-global", "ca-central-1", "eu-central-1", "eu-north-1", @@ -50,9 +70,10 @@ const partitionHash: PartitionHash = { tags: ["dualstack"], }, ], + endpoint: "aws-global", }, "aws-cn": { - regions: ["cn-north-1", "cn-northwest-1"], + regions: ["aws-cn-global", "cn-north-1", "cn-northwest-1"], regionRegex: "^cn\\-\\w+\\-\\d+$", variants: [ { @@ -72,6 +93,7 @@ const partitionHash: PartitionHash = { tags: ["dualstack"], }, ], + endpoint: "aws-cn-global", }, "aws-iso": { regions: ["us-iso-east-1", "us-iso-west-1"], diff --git a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json index ed74003a92bf5..02dfa5228f61e 100644 --- a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json +++ b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json @@ -4693,6 +4693,16 @@ } } }, + "edge.sagemaker": { + "endpoints": { + "ap-northeast-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-2": {} + } + }, "eks": { "defaults": { "protocols": ["http", "https"], @@ -7023,7 +7033,17 @@ "isRegionalized": true }, "health": { + "defaults": { + "protocols": ["https"], + "sslCommonName": "health.us-east-1.amazonaws.com" + }, "endpoints": { + "aws-global": { + "credentialScope": { + "region": "us-east-1" + }, + "hostname": "global.health.amazonaws.com" + }, "fips-us-east-2": { "credentialScope": { "region": "us-east-2" @@ -7043,7 +7063,9 @@ } ] } - } + }, + "isRegionalized": false, + "partitionEndpoint": "aws-global" }, "healthlake": { "defaults": { @@ -16150,10 +16172,20 @@ "isRegionalized": true }, "health": { + "defaults": { + "protocols": ["https"], + "sslCommonName": "health.cn-northwest-1.amazonaws.com.cn" + }, "endpoints": { - "cn-north-1": {}, - "cn-northwest-1": {} - } + "aws-cn-global": { + "credentialScope": { + "region": "cn-northwest-1" + }, + "hostname": "global.health.amazonaws.com.cn" + } + }, + "isRegionalized": false, + "partitionEndpoint": "aws-cn-global" }, "iam": { "endpoints": { From 97d5f288e94e8d19f9079aed97126df606f4c315 Mon Sep 17 00:00:00 2001 From: awstools Date: Mon, 18 Jul 2022 18:48:53 +0000 Subject: [PATCH 09/31] Publish v3.132.0 --- CHANGELOG.md | 17 +++++++++++++++++ .../CHANGELOG.md | 11 +++++++++++ .../package.json | 2 +- clients/client-ec2/CHANGELOG.md | 8 ++++++++ clients/client-ec2/package.json | 2 +- clients/client-elasticache/CHANGELOG.md | 11 +++++++++++ clients/client-elasticache/package.json | 2 +- clients/client-health/CHANGELOG.md | 11 +++++++++++ clients/client-health/package.json | 2 +- clients/client-kms/CHANGELOG.md | 11 +++++++++++ clients/client-kms/package.json | 2 +- clients/client-mediapackage/CHANGELOG.md | 11 +++++++++++ clients/client-mediapackage/package.json | 2 +- clients/client-sagemaker/CHANGELOG.md | 11 +++++++++++ clients/client-sagemaker/package.json | 2 +- clients/client-sso-admin/CHANGELOG.md | 11 +++++++++++ clients/client-sso-admin/package.json | 2 +- lerna.json | 2 +- 18 files changed, 111 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd9022a9a2199..6733b77dafe75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,23 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-application-discovery-service:** Add AWS Agentless Collector details to the GetDiscoverySummary API response ([d790bda](https://github.com/aws/aws-sdk-js-v3/commit/d790bda6e7dffcb5a74c12550d4bbd14bdb4051c)) +* **client-elasticache:** Adding AutoMinorVersionUpgrade in the DescribeReplicationGroups API ([e694134](https://github.com/aws/aws-sdk-js-v3/commit/e694134054db9c206aec4b6e6fa26b8ab807d6eb)) +* **client-kms:** Added support for the SM2 KeySpec in China Partition Regions ([250fd19](https://github.com/aws/aws-sdk-js-v3/commit/250fd19d65a83b3d183ced9863bf6aba8f1fb698)) +* **client-mediapackage:** This release adds "IncludeIframeOnlyStream" for Dash endpoints and increases the number of supported video and audio encryption presets for Speke v2 ([1af0368](https://github.com/aws/aws-sdk-js-v3/commit/1af0368f117fcf8c5bc43659183aec028b2ecc8e)) +* **client-sagemaker:** Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. ([3f89139](https://github.com/aws/aws-sdk-js-v3/commit/3f891393cfe5002f24317a340f88ce0e1df7aa44)) +* **client-sso-admin:** AWS SSO now supports attaching customer managed policies and a permissions boundary to your permission sets. This release adds new API operations to manage and view the customer managed policies and the permissions boundary for a given permission set. ([e65c9b2](https://github.com/aws/aws-sdk-js-v3/commit/e65c9b2e1203291d68d0fdb11d1b467d83e37cf5)) +* **clients:** update client endpoints as of 2022-07-18 ([87e9e29](https://github.com/aws/aws-sdk-js-v3/commit/87e9e2970f5fd24742968462e3944235c5e08380)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) diff --git a/clients/client-application-discovery-service/CHANGELOG.md b/clients/client-application-discovery-service/CHANGELOG.md index 09bc2c5ac8f14..5c0b6a1bbd12c 100644 --- a/clients/client-application-discovery-service/CHANGELOG.md +++ b/clients/client-application-discovery-service/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-application-discovery-service:** Add AWS Agentless Collector details to the GetDiscoverySummary API response ([d790bda](https://github.com/aws/aws-sdk-js-v3/commit/d790bda6e7dffcb5a74c12550d4bbd14bdb4051c)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-application-discovery-service diff --git a/clients/client-application-discovery-service/package.json b/clients/client-application-discovery-service/package.json index 8288e9fe8b97f..b545751037a8f 100644 --- a/clients/client-application-discovery-service/package.json +++ b/clients/client-application-discovery-service/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-application-discovery-service", "description": "AWS SDK for JavaScript Application Discovery Service Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-ec2/CHANGELOG.md b/clients/client-ec2/CHANGELOG.md index db4a7383778ef..1d0e8ed87ae0c 100644 --- a/clients/client-ec2/CHANGELOG.md +++ b/clients/client-ec2/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + +**Note:** Version bump only for package @aws-sdk/client-ec2 + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-ec2 diff --git a/clients/client-ec2/package.json b/clients/client-ec2/package.json index 076945459cc37..3929546357be9 100644 --- a/clients/client-ec2/package.json +++ b/clients/client-ec2/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-ec2", "description": "AWS SDK for JavaScript Ec2 Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-elasticache/CHANGELOG.md b/clients/client-elasticache/CHANGELOG.md index be995af834c7f..8757b2e08972b 100644 --- a/clients/client-elasticache/CHANGELOG.md +++ b/clients/client-elasticache/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-elasticache:** Adding AutoMinorVersionUpgrade in the DescribeReplicationGroups API ([e694134](https://github.com/aws/aws-sdk-js-v3/commit/e694134054db9c206aec4b6e6fa26b8ab807d6eb)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-elasticache diff --git a/clients/client-elasticache/package.json b/clients/client-elasticache/package.json index a60daee08e202..d4b236a6c0cd1 100644 --- a/clients/client-elasticache/package.json +++ b/clients/client-elasticache/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-elasticache", "description": "AWS SDK for JavaScript Elasticache Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-health/CHANGELOG.md b/clients/client-health/CHANGELOG.md index 56382fe1cb8a8..8b5c171ca2e94 100644 --- a/clients/client-health/CHANGELOG.md +++ b/clients/client-health/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **clients:** update client endpoints as of 2022-07-18 ([87e9e29](https://github.com/aws/aws-sdk-js-v3/commit/87e9e2970f5fd24742968462e3944235c5e08380)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-health diff --git a/clients/client-health/package.json b/clients/client-health/package.json index 8da1e7699280a..88c183a09cfa7 100644 --- a/clients/client-health/package.json +++ b/clients/client-health/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-health", "description": "AWS SDK for JavaScript Health Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-kms/CHANGELOG.md b/clients/client-kms/CHANGELOG.md index 962421ed8c8ef..e1b01fe4d205a 100644 --- a/clients/client-kms/CHANGELOG.md +++ b/clients/client-kms/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-kms:** Added support for the SM2 KeySpec in China Partition Regions ([250fd19](https://github.com/aws/aws-sdk-js-v3/commit/250fd19d65a83b3d183ced9863bf6aba8f1fb698)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-kms diff --git a/clients/client-kms/package.json b/clients/client-kms/package.json index 45a1c873a5cb3..338b29a723f06 100644 --- a/clients/client-kms/package.json +++ b/clients/client-kms/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-kms", "description": "AWS SDK for JavaScript Kms Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-mediapackage/CHANGELOG.md b/clients/client-mediapackage/CHANGELOG.md index 09693f6444829..b705a72d112b0 100644 --- a/clients/client-mediapackage/CHANGELOG.md +++ b/clients/client-mediapackage/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-mediapackage:** This release adds "IncludeIframeOnlyStream" for Dash endpoints and increases the number of supported video and audio encryption presets for Speke v2 ([1af0368](https://github.com/aws/aws-sdk-js-v3/commit/1af0368f117fcf8c5bc43659183aec028b2ecc8e)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-mediapackage diff --git a/clients/client-mediapackage/package.json b/clients/client-mediapackage/package.json index 46b277be00521..b5077f388290c 100644 --- a/clients/client-mediapackage/package.json +++ b/clients/client-mediapackage/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-mediapackage", "description": "AWS SDK for JavaScript Mediapackage Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-sagemaker/CHANGELOG.md b/clients/client-sagemaker/CHANGELOG.md index 6cbe9402a98bf..579855cb368aa 100644 --- a/clients/client-sagemaker/CHANGELOG.md +++ b/clients/client-sagemaker/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-sagemaker:** Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. ([3f89139](https://github.com/aws/aws-sdk-js-v3/commit/3f891393cfe5002f24317a340f88ce0e1df7aa44)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-sagemaker diff --git a/clients/client-sagemaker/package.json b/clients/client-sagemaker/package.json index 06be980e522b6..ac1285425620b 100644 --- a/clients/client-sagemaker/package.json +++ b/clients/client-sagemaker/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-sagemaker", "description": "AWS SDK for JavaScript Sagemaker Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-sso-admin/CHANGELOG.md b/clients/client-sso-admin/CHANGELOG.md index d7dd2531724c6..79ee9af0f3a22 100644 --- a/clients/client-sso-admin/CHANGELOG.md +++ b/clients/client-sso-admin/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-sso-admin:** AWS SSO now supports attaching customer managed policies and a permissions boundary to your permission sets. This release adds new API operations to manage and view the customer managed policies and the permissions boundary for a given permission set. ([e65c9b2](https://github.com/aws/aws-sdk-js-v3/commit/e65c9b2e1203291d68d0fdb11d1b467d83e37cf5)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-sso-admin diff --git a/clients/client-sso-admin/package.json b/clients/client-sso-admin/package.json index 6def89942002d..8ef2e5441695c 100644 --- a/clients/client-sso-admin/package.json +++ b/clients/client-sso-admin/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-sso-admin", "description": "AWS SDK for JavaScript Sso Admin Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/lerna.json b/lerna.json index e0066cd64a254..02143bc0cfd2a 100644 --- a/lerna.json +++ b/lerna.json @@ -1,5 +1,5 @@ { - "version": "3.131.0", + "version": "3.132.0", "npmClient": "yarn", "useWorkspaces": true, "command": { From ed39bc2c1bd0d2bad021a043b7642fae5ea9263d Mon Sep 17 00:00:00 2001 From: awstools Date: Tue, 19 Jul 2022 18:24:17 +0000 Subject: [PATCH 10/31] feat(client-sagemaker): Fixed an issue with cross account QueryLineage --- clients/client-sagemaker/src/models/models_3.ts | 2 +- codegen/sdk-codegen/aws-models/sagemaker.json | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/clients/client-sagemaker/src/models/models_3.ts b/clients/client-sagemaker/src/models/models_3.ts index 1755b2c7621b3..0683543a7ebc0 100644 --- a/clients/client-sagemaker/src/models/models_3.ts +++ b/clients/client-sagemaker/src/models/models_3.ts @@ -3096,7 +3096,7 @@ export interface QueryLineageRequest { /** *

        A list of resource Amazon Resource Name (ARN) that represent the starting point for your lineage query.

        */ - StartArns: string[] | undefined; + StartArns?: string[]; /** *

        Associations between lineage entities have a direction. This parameter determines the direction from the diff --git a/codegen/sdk-codegen/aws-models/sagemaker.json b/codegen/sdk-codegen/aws-models/sagemaker.json index 0786d0856619c..82a13aeb7279f 100644 --- a/codegen/sdk-codegen/aws-models/sagemaker.json +++ b/codegen/sdk-codegen/aws-models/sagemaker.json @@ -35240,8 +35240,7 @@ "StartArns": { "target": "com.amazonaws.sagemaker#QueryLineageStartArns", "traits": { - "smithy.api#documentation": "

        A list of resource Amazon Resource Name (ARN) that represent the starting point for your lineage query.

        ", - "smithy.api#required": {} + "smithy.api#documentation": "

        A list of resource Amazon Resource Name (ARN) that represent the starting point for your lineage query.

        " } }, "Direction": { @@ -35312,7 +35311,7 @@ }, "traits": { "smithy.api#length": { - "min": 1, + "min": 0, "max": 1 } } From 90ed85b6817d3f035124d62818670b3b4807c492 Mon Sep 17 00:00:00 2001 From: awstools Date: Tue, 19 Jul 2022 18:24:17 +0000 Subject: [PATCH 11/31] feat(client-devops-guru): Added new APIs for log anomaly detection feature. --- clients/client-devops-guru/src/DevOpsGuru.ts | 78 +++ .../src/DevOpsGuruClient.ts | 12 + .../commands/ListAnomalousLogGroupsCommand.ts | 98 +++ .../commands/ListMonitoredResourcesCommand.ts | 98 +++ .../client-devops-guru/src/commands/index.ts | 2 + .../client-devops-guru/src/models/models_0.ts | 403 ++++++++++++- .../ListAnomalousLogGroupsPaginator.ts | 61 ++ .../ListMonitoredResourcesPaginator.ts | 61 ++ .../src/pagination/index.ts | 2 + .../src/protocols/Aws_restJson1.ts | 358 +++++++++++ .../sdk-codegen/aws-models/devops-guru.json | 557 +++++++++++++++++- 11 files changed, 1725 insertions(+), 5 deletions(-) create mode 100644 clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts create mode 100644 clients/client-devops-guru/src/commands/ListMonitoredResourcesCommand.ts create mode 100644 clients/client-devops-guru/src/pagination/ListAnomalousLogGroupsPaginator.ts create mode 100644 clients/client-devops-guru/src/pagination/ListMonitoredResourcesPaginator.ts diff --git a/clients/client-devops-guru/src/DevOpsGuru.ts b/clients/client-devops-guru/src/DevOpsGuru.ts index c63bd81c9e26a..9bf86d21f07be 100644 --- a/clients/client-devops-guru/src/DevOpsGuru.ts +++ b/clients/client-devops-guru/src/DevOpsGuru.ts @@ -81,12 +81,22 @@ import { ListAnomaliesForInsightCommandInput, ListAnomaliesForInsightCommandOutput, } from "./commands/ListAnomaliesForInsightCommand"; +import { + ListAnomalousLogGroupsCommand, + ListAnomalousLogGroupsCommandInput, + ListAnomalousLogGroupsCommandOutput, +} from "./commands/ListAnomalousLogGroupsCommand"; import { ListEventsCommand, ListEventsCommandInput, ListEventsCommandOutput } from "./commands/ListEventsCommand"; import { ListInsightsCommand, ListInsightsCommandInput, ListInsightsCommandOutput, } from "./commands/ListInsightsCommand"; +import { + ListMonitoredResourcesCommand, + ListMonitoredResourcesCommandInput, + ListMonitoredResourcesCommandOutput, +} from "./commands/ListMonitoredResourcesCommand"; import { ListNotificationChannelsCommand, ListNotificationChannelsCommandInput, @@ -706,6 +716,40 @@ export class DevOpsGuru extends DevOpsGuruClient { } } + /** + *

        + * Returns the list of log groups that contain log anomalies. + *

        + */ + public listAnomalousLogGroups( + args: ListAnomalousLogGroupsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listAnomalousLogGroups( + args: ListAnomalousLogGroupsCommandInput, + cb: (err: any, data?: ListAnomalousLogGroupsCommandOutput) => void + ): void; + public listAnomalousLogGroups( + args: ListAnomalousLogGroupsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListAnomalousLogGroupsCommandOutput) => void + ): void; + public listAnomalousLogGroups( + args: ListAnomalousLogGroupsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListAnomalousLogGroupsCommandOutput) => void), + cb?: (err: any, data?: ListAnomalousLogGroupsCommandOutput) => void + ): Promise | void { + const command = new ListAnomalousLogGroupsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

        Returns a list of the events emitted by the resources that are evaluated by DevOps Guru. * You can use filters to specify which events are returned.

        @@ -764,6 +808,40 @@ export class DevOpsGuru extends DevOpsGuruClient { } } + /** + *

        + * Returns the list of all log groups that are being monitored and tagged by DevOps Guru. + *

        + */ + public listMonitoredResources( + args: ListMonitoredResourcesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listMonitoredResources( + args: ListMonitoredResourcesCommandInput, + cb: (err: any, data?: ListMonitoredResourcesCommandOutput) => void + ): void; + public listMonitoredResources( + args: ListMonitoredResourcesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListMonitoredResourcesCommandOutput) => void + ): void; + public listMonitoredResources( + args: ListMonitoredResourcesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListMonitoredResourcesCommandOutput) => void), + cb?: (err: any, data?: ListMonitoredResourcesCommandOutput) => void + ): Promise | void { + const command = new ListMonitoredResourcesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

        Returns a list of notification channels configured for DevOps Guru. Each notification * channel is used to notify you when DevOps Guru generates an insight that contains information diff --git a/clients/client-devops-guru/src/DevOpsGuruClient.ts b/clients/client-devops-guru/src/DevOpsGuruClient.ts index fe001b5be4ee2..7ea4e459a2ac6 100644 --- a/clients/client-devops-guru/src/DevOpsGuruClient.ts +++ b/clients/client-devops-guru/src/DevOpsGuruClient.ts @@ -102,8 +102,16 @@ import { ListAnomaliesForInsightCommandInput, ListAnomaliesForInsightCommandOutput, } from "./commands/ListAnomaliesForInsightCommand"; +import { + ListAnomalousLogGroupsCommandInput, + ListAnomalousLogGroupsCommandOutput, +} from "./commands/ListAnomalousLogGroupsCommand"; import { ListEventsCommandInput, ListEventsCommandOutput } from "./commands/ListEventsCommand"; import { ListInsightsCommandInput, ListInsightsCommandOutput } from "./commands/ListInsightsCommand"; +import { + ListMonitoredResourcesCommandInput, + ListMonitoredResourcesCommandOutput, +} from "./commands/ListMonitoredResourcesCommand"; import { ListNotificationChannelsCommandInput, ListNotificationChannelsCommandOutput, @@ -161,8 +169,10 @@ export type ServiceInputTypes = | GetCostEstimationCommandInput | GetResourceCollectionCommandInput | ListAnomaliesForInsightCommandInput + | ListAnomalousLogGroupsCommandInput | ListEventsCommandInput | ListInsightsCommandInput + | ListMonitoredResourcesCommandInput | ListNotificationChannelsCommandInput | ListOrganizationInsightsCommandInput | ListRecommendationsCommandInput @@ -192,8 +202,10 @@ export type ServiceOutputTypes = | GetCostEstimationCommandOutput | GetResourceCollectionCommandOutput | ListAnomaliesForInsightCommandOutput + | ListAnomalousLogGroupsCommandOutput | ListEventsCommandOutput | ListInsightsCommandOutput + | ListMonitoredResourcesCommandOutput | ListNotificationChannelsCommandOutput | ListOrganizationInsightsCommandOutput | ListRecommendationsCommandOutput diff --git a/clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts b/clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts new file mode 100644 index 0000000000000..55fbef7d62b07 --- /dev/null +++ b/clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts @@ -0,0 +1,98 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DevOpsGuruClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DevOpsGuruClient"; +import { ListAnomalousLogGroupsRequest, ListAnomalousLogGroupsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListAnomalousLogGroupsCommand, + serializeAws_restJson1ListAnomalousLogGroupsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListAnomalousLogGroupsCommandInput extends ListAnomalousLogGroupsRequest {} +export interface ListAnomalousLogGroupsCommandOutput extends ListAnomalousLogGroupsResponse, __MetadataBearer {} + +/** + *

        + * Returns the list of log groups that contain log anomalies. + *

        + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DevOpsGuruClient, ListAnomalousLogGroupsCommand } from "@aws-sdk/client-devops-guru"; // ES Modules import + * // const { DevOpsGuruClient, ListAnomalousLogGroupsCommand } = require("@aws-sdk/client-devops-guru"); // CommonJS import + * const client = new DevOpsGuruClient(config); + * const command = new ListAnomalousLogGroupsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAnomalousLogGroupsCommandInput} for command's `input` shape. + * @see {@link ListAnomalousLogGroupsCommandOutput} for command's `response` shape. + * @see {@link DevOpsGuruClientResolvedConfig | config} for DevOpsGuruClient's `config` shape. + * + */ +export class ListAnomalousLogGroupsCommand extends $Command< + ListAnomalousLogGroupsCommandInput, + ListAnomalousLogGroupsCommandOutput, + DevOpsGuruClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAnomalousLogGroupsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DevOpsGuruClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DevOpsGuruClient"; + const commandName = "ListAnomalousLogGroupsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListAnomalousLogGroupsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListAnomalousLogGroupsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListAnomalousLogGroupsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListAnomalousLogGroupsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListAnomalousLogGroupsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-devops-guru/src/commands/ListMonitoredResourcesCommand.ts b/clients/client-devops-guru/src/commands/ListMonitoredResourcesCommand.ts new file mode 100644 index 0000000000000..94750c8694f76 --- /dev/null +++ b/clients/client-devops-guru/src/commands/ListMonitoredResourcesCommand.ts @@ -0,0 +1,98 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DevOpsGuruClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DevOpsGuruClient"; +import { ListMonitoredResourcesRequest, ListMonitoredResourcesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListMonitoredResourcesCommand, + serializeAws_restJson1ListMonitoredResourcesCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListMonitoredResourcesCommandInput extends ListMonitoredResourcesRequest {} +export interface ListMonitoredResourcesCommandOutput extends ListMonitoredResourcesResponse, __MetadataBearer {} + +/** + *

        + * Returns the list of all log groups that are being monitored and tagged by DevOps Guru. + *

        + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DevOpsGuruClient, ListMonitoredResourcesCommand } from "@aws-sdk/client-devops-guru"; // ES Modules import + * // const { DevOpsGuruClient, ListMonitoredResourcesCommand } = require("@aws-sdk/client-devops-guru"); // CommonJS import + * const client = new DevOpsGuruClient(config); + * const command = new ListMonitoredResourcesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListMonitoredResourcesCommandInput} for command's `input` shape. + * @see {@link ListMonitoredResourcesCommandOutput} for command's `response` shape. + * @see {@link DevOpsGuruClientResolvedConfig | config} for DevOpsGuruClient's `config` shape. + * + */ +export class ListMonitoredResourcesCommand extends $Command< + ListMonitoredResourcesCommandInput, + ListMonitoredResourcesCommandOutput, + DevOpsGuruClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListMonitoredResourcesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DevOpsGuruClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DevOpsGuruClient"; + const commandName = "ListMonitoredResourcesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListMonitoredResourcesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListMonitoredResourcesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListMonitoredResourcesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListMonitoredResourcesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListMonitoredResourcesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-devops-guru/src/commands/index.ts b/clients/client-devops-guru/src/commands/index.ts index 2b36ae510136f..b324c60d7c42e 100644 --- a/clients/client-devops-guru/src/commands/index.ts +++ b/clients/client-devops-guru/src/commands/index.ts @@ -15,8 +15,10 @@ export * from "./DescribeServiceIntegrationCommand"; export * from "./GetCostEstimationCommand"; export * from "./GetResourceCollectionCommand"; export * from "./ListAnomaliesForInsightCommand"; +export * from "./ListAnomalousLogGroupsCommand"; export * from "./ListEventsCommand"; export * from "./ListInsightsCommand"; +export * from "./ListMonitoredResourcesCommand"; export * from "./ListNotificationChannelsCommand"; export * from "./ListOrganizationInsightsCommand"; export * from "./ListRecommendationsCommand"; diff --git a/clients/client-devops-guru/src/models/models_0.ts b/clients/client-devops-guru/src/models/models_0.ts index 55417855fcd98..979be134726b9 100644 --- a/clients/client-devops-guru/src/models/models_0.ts +++ b/clients/client-devops-guru/src/models/models_0.ts @@ -422,6 +422,156 @@ export namespace AmazonCodeGuruProfilerIntegration { }); } +export enum LogAnomalyType { + BLOCK_FORMAT = "BLOCK_FORMAT", + FORMAT = "FORMAT", + HTTP_CODE = "HTTP_CODE", + KEYWORD = "KEYWORD", + KEYWORD_TOKEN = "KEYWORD_TOKEN", + NEW_FIELD_NAME = "NEW_FIELD_NAME", + NUMERICAL_NAN = "NUMERICAL_NAN", + NUMERICAL_POINT = "NUMERICAL_POINT", +} + +/** + *

        + * Information about an anomalous log event found within a log group. + *

        + */ +export interface LogAnomalyClass { + /** + *

        + * The name of the Amazon CloudWatch log stream that the anomalous log event belongs to. A log stream is a sequence of log events that share the same source. + *

        + */ + LogStreamName?: string; + + /** + *

        + * The type of log anomaly that has been detected. + *

        + */ + LogAnomalyType?: LogAnomalyType | string; + + /** + *

        + * The token where the anomaly was detected. This may refer to an exception or another location, or it may be blank for log anomalies such as format anomalies. + *

        + */ + LogAnomalyToken?: string; + + /** + *

        + * The ID of the log event. + *

        + */ + LogEventId?: string; + + /** + *

        + * The explanation for why the log event is considered an anomaly. + *

        + */ + Explanation?: string; + + /** + *

        + * The number of log lines where this anomalous log event occurs. + *

        + */ + NumberOfLogLinesOccurrences?: number; + + /** + *

        + * The time of the first occurrence of the anomalous log event. + *

        + */ + LogEventTimestamp?: Date; +} + +export namespace LogAnomalyClass { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogAnomalyClass): any => ({ + ...obj, + }); +} + +/** + *

        + * A cluster of similar anomalous log events found within a log group. + *

        + */ +export interface LogAnomalyShowcase { + /** + *

        + * A list of anomalous log events that may be related. + *

        + */ + LogAnomalyClasses?: LogAnomalyClass[]; +} + +export namespace LogAnomalyShowcase { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogAnomalyShowcase): any => ({ + ...obj, + }); +} + +/** + *

        + * An Amazon CloudWatch log group that contains log anomalies and is used to generate an insight. + *

        + */ +export interface AnomalousLogGroup { + /** + *

        + * The name of the CloudWatch log group. + *

        + */ + LogGroupName?: string; + + /** + *

        + * The time the anomalous log events began. The impact start time indicates the time of the first log anomaly event that occurs. + *

        + */ + ImpactStartTime?: Date; + + /** + *

        + * The time the anomalous log events stopped. + *

        + */ + ImpactEndTime?: Date; + + /** + *

        + * The number of log lines that were scanned for anomalous log events. + *

        + */ + NumberOfLogLinesScanned?: number; + + /** + *

        + * The log anomalies in the log group. Each log anomaly displayed represents a cluster of similar anomalous log events. + *

        + */ + LogAnomalyShowcases?: LogAnomalyShowcase[]; +} + +export namespace AnomalousLogGroup { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AnomalousLogGroup): any => ({ + ...obj, + }); +} + /** *

        A time range that specifies when DevOps Guru opens and then closes an anomaly. This * is different from AnomalyTimeRange, which specifies the time range when @@ -482,7 +632,7 @@ export enum AnomalySeverity { } /** - *

        The dimension of am Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in + *

        The dimension of an Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in * your account for operational problems and anomalous behavior. A dimension is a * name/value pair that is part of the identity of a metric. A metric can have up to 10 * dimensions. For more information, see Dimensions in the Amazon CloudWatch User Guide.

        @@ -2594,6 +2744,27 @@ export enum OptInStatus { ENABLED = "ENABLED", } +/** + *

        + * Information about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection. + *

        + */ +export interface LogsAnomalyDetectionIntegration { + /** + *

        Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups.

        + */ + OptInStatus?: OptInStatus | string; +} + +export namespace LogsAnomalyDetectionIntegration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogsAnomalyDetectionIntegration): any => ({ + ...obj, + }); +} + /** *

        Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager * OpsCenter for each created insight.

        @@ -2625,6 +2796,13 @@ export interface ServiceIntegrationConfig { * OpsCenter for each created insight.

        */ OpsCenter?: OpsCenterIntegration; + + /** + *

        + * Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups. + *

        + */ + LogsAnomalyDetection?: LogsAnomalyDetectionIntegration; } export namespace ServiceIntegrationConfig { @@ -3405,6 +3583,67 @@ export namespace ListAnomaliesForInsightResponse { }); } +export interface ListAnomalousLogGroupsRequest { + /** + *

        + * The ID of the insight containing the log groups. + *

        + */ + InsightId: string | undefined; + + /** + *

        The maximum number of results to return with a single call. + * To retrieve the remaining results, make another call with the returned nextToken value.

        + */ + MaxResults?: number; + + /** + *

        The pagination token to use to retrieve + * the next page of results for this operation. If this value is null, it retrieves the first page.

        + */ + NextToken?: string; +} + +export namespace ListAnomalousLogGroupsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListAnomalousLogGroupsRequest): any => ({ + ...obj, + }); +} + +export interface ListAnomalousLogGroupsResponse { + /** + *

        + * The ID of the insight containing the log groups. + *

        + */ + InsightId: string | undefined; + + /** + *

        + * The list of Amazon CloudWatch log groups that are related to an insight. + *

        + */ + AnomalousLogGroups: AnomalousLogGroup[] | undefined; + + /** + *

        The pagination token to use to retrieve + * the next page of results for this operation. If there are no more pages, this value is null.

        + */ + NextToken?: string; +} + +export namespace ListAnomalousLogGroupsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListAnomalousLogGroupsResponse): any => ({ + ...obj, + }); +} + export enum EventDataSource { AWS_CLOUD_TRAIL = "AWS_CLOUD_TRAIL", AWS_CODE_DEPLOY = "AWS_CODE_DEPLOY", @@ -3985,6 +4224,136 @@ export namespace ListInsightsResponse { }); } +export enum ResourcePermission { + FULL_PERMISSION = "FULL_PERMISSION", + MISSING_PERMISSION = "MISSING_PERMISSION", +} + +export enum ResourceTypeFilter { + LOG_GROUPS = "LOG_GROUPS", +} + +/** + *

        + * Filters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status. + *

        + */ +export interface ListMonitoredResourcesFilters { + /** + *

        + * The permission status of a resource. + *

        + */ + ResourcePermission: ResourcePermission | string | undefined; + + /** + *

        + * The type of resource that you wish to retrieve, such as log groups. + *

        + */ + ResourceTypeFilters: (ResourceTypeFilter | string)[] | undefined; +} + +export namespace ListMonitoredResourcesFilters { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListMonitoredResourcesFilters): any => ({ + ...obj, + }); +} + +export interface ListMonitoredResourcesRequest { + /** + *

        + * Filters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status. + *

        + */ + Filters: ListMonitoredResourcesFilters | undefined; + + /** + *

        The maximum number of results to return with a single call. + * To retrieve the remaining results, make another call with the returned nextToken value.

        + */ + MaxResults?: number; + + /** + *

        The pagination token to use to retrieve + * the next page of results for this operation. If this value is null, it retrieves the first page.

        + */ + NextToken?: string; +} + +export namespace ListMonitoredResourcesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListMonitoredResourcesRequest): any => ({ + ...obj, + }); +} + +/** + *

        + * Information about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource. + *

        + */ +export interface MonitoredResourceIdentifier { + /** + *

        + * The name of the resource being monitored. + *

        + */ + MonitoredResourceName?: string; + + /** + *

        + * The type of resource being monitored. + *

        + */ + Type?: string; + + /** + *

        + * The permission status of a resource. + *

        + */ + ResourcePermission?: ResourcePermission | string; +} + +export namespace MonitoredResourceIdentifier { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MonitoredResourceIdentifier): any => ({ + ...obj, + }); +} + +export interface ListMonitoredResourcesResponse { + /** + *

        + * Information about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource. + *

        + */ + MonitoredResourceIdentifiers: MonitoredResourceIdentifier[] | undefined; + + /** + *

        The pagination token to use to retrieve + * the next page of results for this operation. If there are no more pages, this value is null.

        + */ + NextToken?: string; +} + +export namespace ListMonitoredResourcesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListMonitoredResourcesResponse): any => ({ + ...obj, + }); +} + export interface ListNotificationChannelsRequest { /** *

        The pagination token to use to retrieve @@ -5038,9 +5407,30 @@ export namespace UpdateResourceCollectionResponse { }); } +/** + *

        + * Information about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection. You can use this to update the configuration. + *

        + */ +export interface LogsAnomalyDetectionIntegrationConfig { + /** + *

        Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups.

        + */ + OptInStatus?: OptInStatus | string; +} + +export namespace LogsAnomalyDetectionIntegrationConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogsAnomalyDetectionIntegrationConfig): any => ({ + ...obj, + }); +} + /** *

        Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager - * OpsCenter for each created insight.

        + * OpsCenter for each created insight. You can use this to update the configuration.

        */ export interface OpsCenterIntegrationConfig { /** @@ -5066,9 +5456,16 @@ export namespace OpsCenterIntegrationConfig { export interface UpdateServiceIntegrationConfig { /** *

        Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager - * OpsCenter for each created insight.

        + * OpsCenter for each created insight. You can use this to update the configuration.

        */ OpsCenter?: OpsCenterIntegrationConfig; + + /** + *

        + * Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups. + *

        + */ + LogsAnomalyDetection?: LogsAnomalyDetectionIntegrationConfig; } export namespace UpdateServiceIntegrationConfig { diff --git a/clients/client-devops-guru/src/pagination/ListAnomalousLogGroupsPaginator.ts b/clients/client-devops-guru/src/pagination/ListAnomalousLogGroupsPaginator.ts new file mode 100644 index 0000000000000..c3828d32fb4b9 --- /dev/null +++ b/clients/client-devops-guru/src/pagination/ListAnomalousLogGroupsPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListAnomalousLogGroupsCommand, + ListAnomalousLogGroupsCommandInput, + ListAnomalousLogGroupsCommandOutput, +} from "../commands/ListAnomalousLogGroupsCommand"; +import { DevOpsGuru } from "../DevOpsGuru"; +import { DevOpsGuruClient } from "../DevOpsGuruClient"; +import { DevOpsGuruPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: DevOpsGuruClient, + input: ListAnomalousLogGroupsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListAnomalousLogGroupsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: DevOpsGuru, + input: ListAnomalousLogGroupsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listAnomalousLogGroups(input, ...args); +}; +export async function* paginateListAnomalousLogGroups( + config: DevOpsGuruPaginationConfiguration, + input: ListAnomalousLogGroupsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListAnomalousLogGroupsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof DevOpsGuru) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof DevOpsGuruClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected DevOpsGuru | DevOpsGuruClient"); + } + yield page; + const prevToken = token; + token = page.NextToken; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-devops-guru/src/pagination/ListMonitoredResourcesPaginator.ts b/clients/client-devops-guru/src/pagination/ListMonitoredResourcesPaginator.ts new file mode 100644 index 0000000000000..60ee3785e2f7a --- /dev/null +++ b/clients/client-devops-guru/src/pagination/ListMonitoredResourcesPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListMonitoredResourcesCommand, + ListMonitoredResourcesCommandInput, + ListMonitoredResourcesCommandOutput, +} from "../commands/ListMonitoredResourcesCommand"; +import { DevOpsGuru } from "../DevOpsGuru"; +import { DevOpsGuruClient } from "../DevOpsGuruClient"; +import { DevOpsGuruPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: DevOpsGuruClient, + input: ListMonitoredResourcesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListMonitoredResourcesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: DevOpsGuru, + input: ListMonitoredResourcesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listMonitoredResources(input, ...args); +}; +export async function* paginateListMonitoredResources( + config: DevOpsGuruPaginationConfiguration, + input: ListMonitoredResourcesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListMonitoredResourcesCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof DevOpsGuru) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof DevOpsGuruClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected DevOpsGuru | DevOpsGuruClient"); + } + yield page; + const prevToken = token; + token = page.NextToken; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-devops-guru/src/pagination/index.ts b/clients/client-devops-guru/src/pagination/index.ts index 32c3e15520573..c526165ffe396 100644 --- a/clients/client-devops-guru/src/pagination/index.ts +++ b/clients/client-devops-guru/src/pagination/index.ts @@ -8,8 +8,10 @@ export * from "./GetResourceCollectionPaginator"; // smithy-typescript generated code export * from "./Interfaces"; export * from "./ListAnomaliesForInsightPaginator"; +export * from "./ListAnomalousLogGroupsPaginator"; export * from "./ListEventsPaginator"; export * from "./ListInsightsPaginator"; +export * from "./ListMonitoredResourcesPaginator"; export * from "./ListNotificationChannelsPaginator"; export * from "./ListOrganizationInsightsPaginator"; export * from "./ListRecommendationsPaginator"; diff --git a/clients/client-devops-guru/src/protocols/Aws_restJson1.ts b/clients/client-devops-guru/src/protocols/Aws_restJson1.ts index 1d94de98f5a61..db8d6cb109d0a 100644 --- a/clients/client-devops-guru/src/protocols/Aws_restJson1.ts +++ b/clients/client-devops-guru/src/protocols/Aws_restJson1.ts @@ -69,8 +69,16 @@ import { ListAnomaliesForInsightCommandInput, ListAnomaliesForInsightCommandOutput, } from "../commands/ListAnomaliesForInsightCommand"; +import { + ListAnomalousLogGroupsCommandInput, + ListAnomalousLogGroupsCommandOutput, +} from "../commands/ListAnomalousLogGroupsCommand"; import { ListEventsCommandInput, ListEventsCommandOutput } from "../commands/ListEventsCommand"; import { ListInsightsCommandInput, ListInsightsCommandOutput } from "../commands/ListInsightsCommand"; +import { + ListMonitoredResourcesCommandInput, + ListMonitoredResourcesCommandOutput, +} from "../commands/ListMonitoredResourcesCommand"; import { ListNotificationChannelsCommandInput, ListNotificationChannelsCommandOutput, @@ -115,6 +123,7 @@ import { AccountHealth, AccountInsightHealth, AmazonCodeGuruProfilerIntegration, + AnomalousLogGroup, AnomalyReportedTimeRange, AnomalyResource, AnomalySourceDetails, @@ -146,6 +155,12 @@ import { ListInsightsClosedStatusFilter, ListInsightsOngoingStatusFilter, ListInsightsStatusFilter, + ListMonitoredResourcesFilters, + LogAnomalyClass, + LogAnomalyShowcase, + LogsAnomalyDetectionIntegration, + LogsAnomalyDetectionIntegrationConfig, + MonitoredResourceIdentifier, NotificationChannel, NotificationChannelConfig, OpsCenterIntegration, @@ -179,6 +194,7 @@ import { ResourceCollection, ResourceCollectionFilter, ResourceNotFoundException, + ResourceTypeFilter, SearchInsightsFilters, SearchOrganizationInsightsFilters, ServiceCollection, @@ -654,6 +670,32 @@ export const serializeAws_restJson1ListAnomaliesForInsightCommand = async ( }); }; +export const serializeAws_restJson1ListAnomalousLogGroupsCommand = async ( + input: ListAnomalousLogGroupsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/list-log-anomalies"; + let body: any; + body = JSON.stringify({ + ...(input.InsightId != null && { InsightId: input.InsightId }), + ...(input.MaxResults != null && { MaxResults: input.MaxResults }), + ...(input.NextToken != null && { NextToken: input.NextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1ListEventsCommand = async ( input: ListEventsCommandInput, context: __SerdeContext @@ -709,6 +751,34 @@ export const serializeAws_restJson1ListInsightsCommand = async ( }); }; +export const serializeAws_restJson1ListMonitoredResourcesCommand = async ( + input: ListMonitoredResourcesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/monitoredResources"; + let body: any; + body = JSON.stringify({ + ...(input.Filters != null && { + Filters: serializeAws_restJson1ListMonitoredResourcesFilters(input.Filters, context), + }), + ...(input.MaxResults != null && { MaxResults: input.MaxResults }), + ...(input.NextToken != null && { NextToken: input.NextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1ListNotificationChannelsCommand = async ( input: ListNotificationChannelsCommandInput, context: __SerdeContext @@ -1801,6 +1871,9 @@ const deserializeAws_restJson1DescribeServiceIntegrationCommandError = async ( case "InternalServerException": case "com.amazonaws.devopsguru#InternalServerException": throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.devopsguru#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); case "ThrottlingException": case "com.amazonaws.devopsguru#ThrottlingException": throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); @@ -2026,6 +2099,71 @@ const deserializeAws_restJson1ListAnomaliesForInsightCommandError = async ( } }; +export const deserializeAws_restJson1ListAnomalousLogGroupsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListAnomalousLogGroupsCommandError(output, context); + } + const contents: ListAnomalousLogGroupsCommandOutput = { + $metadata: deserializeMetadata(output), + AnomalousLogGroups: undefined, + InsightId: undefined, + NextToken: undefined, + }; + const data: Record = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.AnomalousLogGroups !== undefined && data.AnomalousLogGroups !== null) { + contents.AnomalousLogGroups = deserializeAws_restJson1AnomalousLogGroups(data.AnomalousLogGroups, context); + } + if (data.InsightId !== undefined && data.InsightId !== null) { + contents.InsightId = __expectString(data.InsightId); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListAnomalousLogGroupsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.devopsguru#AccessDeniedException": + throw await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context); + case "InternalServerException": + case "com.amazonaws.devopsguru#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.devopsguru#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.devopsguru#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.devopsguru#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1ListEventsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2149,6 +2287,67 @@ const deserializeAws_restJson1ListInsightsCommandError = async ( } }; +export const deserializeAws_restJson1ListMonitoredResourcesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListMonitoredResourcesCommandError(output, context); + } + const contents: ListMonitoredResourcesCommandOutput = { + $metadata: deserializeMetadata(output), + MonitoredResourceIdentifiers: undefined, + NextToken: undefined, + }; + const data: Record = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.MonitoredResourceIdentifiers !== undefined && data.MonitoredResourceIdentifiers !== null) { + contents.MonitoredResourceIdentifiers = deserializeAws_restJson1MonitoredResourceIdentifiers( + data.MonitoredResourceIdentifiers, + context + ); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListMonitoredResourcesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.devopsguru#InternalServerException": + throw await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.devopsguru#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.devopsguru#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + case "ValidationException": + case "com.amazonaws.devopsguru#ValidationException": + throw await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1ListNotificationChannelsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -3138,6 +3337,27 @@ const serializeAws_restJson1ListInsightsStatusFilter = ( }; }; +const serializeAws_restJson1ListMonitoredResourcesFilters = ( + input: ListMonitoredResourcesFilters, + context: __SerdeContext +): any => { + return { + ...(input.ResourcePermission != null && { ResourcePermission: input.ResourcePermission }), + ...(input.ResourceTypeFilters != null && { + ResourceTypeFilters: serializeAws_restJson1ResourceTypeFilters(input.ResourceTypeFilters, context), + }), + }; +}; + +const serializeAws_restJson1LogsAnomalyDetectionIntegrationConfig = ( + input: LogsAnomalyDetectionIntegrationConfig, + context: __SerdeContext +): any => { + return { + ...(input.OptInStatus != null && { OptInStatus: input.OptInStatus }), + }; +}; + const serializeAws_restJson1NotificationChannelConfig = ( input: NotificationChannelConfig, context: __SerdeContext @@ -3176,6 +3396,20 @@ const serializeAws_restJson1ResourceCollection = (input: ResourceCollection, con }; }; +const serializeAws_restJson1ResourceTypeFilters = ( + input: (ResourceTypeFilter | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1SearchInsightsAccountIdList = (input: string[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -3340,6 +3574,12 @@ const serializeAws_restJson1UpdateServiceIntegrationConfig = ( context: __SerdeContext ): any => { return { + ...(input.LogsAnomalyDetection != null && { + LogsAnomalyDetection: serializeAws_restJson1LogsAnomalyDetectionIntegrationConfig( + input.LogsAnomalyDetection, + context + ), + }), ...(input.OpsCenter != null && { OpsCenter: serializeAws_restJson1OpsCenterIntegrationConfig(input.OpsCenter, context), }), @@ -3427,6 +3667,37 @@ const deserializeAws_restJson1AmazonCodeGuruProfilerIntegration = ( } as any; }; +const deserializeAws_restJson1AnomalousLogGroup = (output: any, context: __SerdeContext): AnomalousLogGroup => { + return { + ImpactEndTime: + output.ImpactEndTime != null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.ImpactEndTime))) + : undefined, + ImpactStartTime: + output.ImpactStartTime != null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.ImpactStartTime))) + : undefined, + LogAnomalyShowcases: + output.LogAnomalyShowcases != null + ? deserializeAws_restJson1LogAnomalyShowcases(output.LogAnomalyShowcases, context) + : undefined, + LogGroupName: __expectString(output.LogGroupName), + NumberOfLogLinesScanned: __expectInt32(output.NumberOfLogLinesScanned), + } as any; +}; + +const deserializeAws_restJson1AnomalousLogGroups = (output: any, context: __SerdeContext): AnomalousLogGroup[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1AnomalousLogGroup(entry, context); + }); + return retVal; +}; + const deserializeAws_restJson1AnomalyReportedTimeRange = ( output: any, context: __SerdeContext @@ -3771,6 +4042,89 @@ const deserializeAws_restJson1InsightTimeRange = (output: any, context: __SerdeC } as any; }; +const deserializeAws_restJson1LogAnomalyClass = (output: any, context: __SerdeContext): LogAnomalyClass => { + return { + Explanation: __expectString(output.Explanation), + LogAnomalyToken: __expectString(output.LogAnomalyToken), + LogAnomalyType: __expectString(output.LogAnomalyType), + LogEventId: __expectString(output.LogEventId), + LogEventTimestamp: + output.LogEventTimestamp != null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LogEventTimestamp))) + : undefined, + LogStreamName: __expectString(output.LogStreamName), + NumberOfLogLinesOccurrences: __expectInt32(output.NumberOfLogLinesOccurrences), + } as any; +}; + +const deserializeAws_restJson1LogAnomalyClasses = (output: any, context: __SerdeContext): LogAnomalyClass[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1LogAnomalyClass(entry, context); + }); + return retVal; +}; + +const deserializeAws_restJson1LogAnomalyShowcase = (output: any, context: __SerdeContext): LogAnomalyShowcase => { + return { + LogAnomalyClasses: + output.LogAnomalyClasses != null + ? deserializeAws_restJson1LogAnomalyClasses(output.LogAnomalyClasses, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1LogAnomalyShowcases = (output: any, context: __SerdeContext): LogAnomalyShowcase[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1LogAnomalyShowcase(entry, context); + }); + return retVal; +}; + +const deserializeAws_restJson1LogsAnomalyDetectionIntegration = ( + output: any, + context: __SerdeContext +): LogsAnomalyDetectionIntegration => { + return { + OptInStatus: __expectString(output.OptInStatus), + } as any; +}; + +const deserializeAws_restJson1MonitoredResourceIdentifier = ( + output: any, + context: __SerdeContext +): MonitoredResourceIdentifier => { + return { + MonitoredResourceName: __expectString(output.MonitoredResourceName), + ResourcePermission: __expectString(output.ResourcePermission), + Type: __expectString(output.Type), + } as any; +}; + +const deserializeAws_restJson1MonitoredResourceIdentifiers = ( + output: any, + context: __SerdeContext +): MonitoredResourceIdentifier[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1MonitoredResourceIdentifier(entry, context); + }); + return retVal; +}; + const deserializeAws_restJson1NotificationChannel = (output: any, context: __SerdeContext): NotificationChannel => { return { Config: @@ -4639,6 +4993,10 @@ const deserializeAws_restJson1ServiceIntegrationConfig = ( context: __SerdeContext ): ServiceIntegrationConfig => { return { + LogsAnomalyDetection: + output.LogsAnomalyDetection != null + ? deserializeAws_restJson1LogsAnomalyDetectionIntegration(output.LogsAnomalyDetection, context) + : undefined, OpsCenter: output.OpsCenter != null ? deserializeAws_restJson1OpsCenterIntegration(output.OpsCenter, context) : undefined, } as any; diff --git a/codegen/sdk-codegen/aws-models/devops-guru.json b/codegen/sdk-codegen/aws-models/devops-guru.json index 83f395b5524f9..fb994db2a95df 100644 --- a/codegen/sdk-codegen/aws-models/devops-guru.json +++ b/codegen/sdk-codegen/aws-models/devops-guru.json @@ -181,6 +181,50 @@ "smithy.api#documentation": "

        Information about your account's integration with Amazon CodeGuru Profiler. This\n\t\t\treturns whether DevOps Guru is configured to consume recommendations generated from Amazon\n\t\t\tCodeGuru Profiler.

        " } }, + "com.amazonaws.devopsguru#AnomalousLogGroup": { + "type": "structure", + "members": { + "LogGroupName": { + "target": "com.amazonaws.devopsguru#LogGroupName", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe name of the CloudWatch log group.\n\t\t

        " + } + }, + "ImpactStartTime": { + "target": "com.amazonaws.devopsguru#Timestamp", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe time the anomalous log events began. The impact start time indicates the time of the first log anomaly event that occurs.\n\t\t

        " + } + }, + "ImpactEndTime": { + "target": "com.amazonaws.devopsguru#Timestamp", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe time the anomalous log events stopped.\n\t\t

        " + } + }, + "NumberOfLogLinesScanned": { + "target": "com.amazonaws.devopsguru#NumberOfLogLinesScanned", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe number of log lines that were scanned for anomalous log events.\n\t\t

        " + } + }, + "LogAnomalyShowcases": { + "target": "com.amazonaws.devopsguru#LogAnomalyShowcases", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe log anomalies in the log group. Each log anomaly displayed represents a cluster of similar anomalous log events.\n\t\t

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        \n\t\t\tAn Amazon CloudWatch log group that contains log anomalies and is used to generate an insight.\n\t\t

        " + } + }, + "com.amazonaws.devopsguru#AnomalousLogGroups": { + "type": "list", + "member": { + "target": "com.amazonaws.devopsguru#AnomalousLogGroup" + } + }, "com.amazonaws.devopsguru#AnomalyDescription": { "type": "string" }, @@ -462,12 +506,18 @@ { "target": "com.amazonaws.devopsguru#ListAnomaliesForInsight" }, + { + "target": "com.amazonaws.devopsguru#ListAnomalousLogGroups" + }, { "target": "com.amazonaws.devopsguru#ListEvents" }, { "target": "com.amazonaws.devopsguru#ListInsights" }, + { + "target": "com.amazonaws.devopsguru#ListMonitoredResources" + }, { "target": "com.amazonaws.devopsguru#ListNotificationChannels" }, @@ -699,7 +749,7 @@ } }, "traits": { - "smithy.api#documentation": "

        The dimension of am Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in\n\t\t\tyour account for operational problems and anomalous behavior. A dimension is a\n\t\t\tname/value pair that is part of the identity of a metric. A metric can have up to 10\n\t\t\tdimensions. For more information, see Dimensions in the Amazon CloudWatch User Guide.

        " + "smithy.api#documentation": "

        The dimension of an Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in\n\t\t\tyour account for operational problems and anomalous behavior. A dimension is a\n\t\t\tname/value pair that is part of the identity of a metric. A metric can have up to 10\n\t\t\tdimensions. For more information, see Dimensions in the Amazon CloudWatch User Guide.

        " } }, "com.amazonaws.devopsguru#CloudWatchMetricsDimensionName": { @@ -1699,6 +1749,9 @@ { "target": "com.amazonaws.devopsguru#InternalServerException" }, + { + "target": "com.amazonaws.devopsguru#ResourceNotFoundException" + }, { "target": "com.amazonaws.devopsguru#ThrottlingException" }, @@ -1995,6 +2048,15 @@ "target": "com.amazonaws.devopsguru#Event" } }, + "com.amazonaws.devopsguru#Explanation": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, "com.amazonaws.devopsguru#GetCostEstimation": { "type": "operation", "input": { @@ -2483,6 +2545,104 @@ } } }, + "com.amazonaws.devopsguru#ListAnomalousLogGroups": { + "type": "operation", + "input": { + "target": "com.amazonaws.devopsguru#ListAnomalousLogGroupsRequest" + }, + "output": { + "target": "com.amazonaws.devopsguru#ListAnomalousLogGroupsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.devopsguru#AccessDeniedException" + }, + { + "target": "com.amazonaws.devopsguru#InternalServerException" + }, + { + "target": "com.amazonaws.devopsguru#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.devopsguru#ThrottlingException" + }, + { + "target": "com.amazonaws.devopsguru#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

        \n\t\t\tReturns the list of log groups that contain log anomalies. \n\t\t

        ", + "smithy.api#http": { + "method": "POST", + "uri": "/list-log-anomalies", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.devopsguru#ListAnomalousLogGroupsMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 200 + } + } + }, + "com.amazonaws.devopsguru#ListAnomalousLogGroupsRequest": { + "type": "structure", + "members": { + "InsightId": { + "target": "com.amazonaws.devopsguru#InsightId", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe ID of the insight containing the log groups.\n\t\t

        ", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.devopsguru#ListAnomalousLogGroupsMaxResults", + "traits": { + "smithy.api#documentation": "

        The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned nextToken value.

        " + } + }, + "NextToken": { + "target": "com.amazonaws.devopsguru#UuidNextToken", + "traits": { + "smithy.api#documentation": "

        The pagination token to use to retrieve \n the next page of results for this operation. If this value is null, it retrieves the first page.

        " + } + } + } + }, + "com.amazonaws.devopsguru#ListAnomalousLogGroupsResponse": { + "type": "structure", + "members": { + "InsightId": { + "target": "com.amazonaws.devopsguru#InsightId", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe ID of the insight containing the log groups.\n\t\t

        ", + "smithy.api#required": {} + } + }, + "AnomalousLogGroups": { + "target": "com.amazonaws.devopsguru#AnomalousLogGroups", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe list of Amazon CloudWatch log groups that are related to an insight.\n\t\t

        ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.devopsguru#UuidNextToken", + "traits": { + "smithy.api#documentation": "

        The pagination token to use to retrieve \n the next page of results for this operation. If there are no more pages, this value is null.

        " + } + } + } + }, "com.amazonaws.devopsguru#ListEvents": { "type": "operation", "input": { @@ -2824,6 +2984,116 @@ "smithy.api#documentation": "

        A filter used by ListInsights to specify which insights to return.\n\t\t

        " } }, + "com.amazonaws.devopsguru#ListMonitoredResources": { + "type": "operation", + "input": { + "target": "com.amazonaws.devopsguru#ListMonitoredResourcesRequest" + }, + "output": { + "target": "com.amazonaws.devopsguru#ListMonitoredResourcesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.devopsguru#InternalServerException" + }, + { + "target": "com.amazonaws.devopsguru#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.devopsguru#ThrottlingException" + }, + { + "target": "com.amazonaws.devopsguru#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

        \n\t\t\tReturns the list of all log groups that are being monitored and tagged by DevOps Guru.\n\t\t

        ", + "smithy.api#http": { + "method": "POST", + "uri": "/monitoredResources", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.devopsguru#ListMonitoredResourcesFilters": { + "type": "structure", + "members": { + "ResourcePermission": { + "target": "com.amazonaws.devopsguru#ResourcePermission", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe permission status of a resource.\n\t\t

        ", + "smithy.api#required": {} + } + }, + "ResourceTypeFilters": { + "target": "com.amazonaws.devopsguru#ResourceTypeFilters", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe type of resource that you wish to retrieve, such as log groups. \n\t\t

        ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

        \n\t\t\tFilters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status.\n\t\t

        " + } + }, + "com.amazonaws.devopsguru#ListMonitoredResourcesMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.devopsguru#ListMonitoredResourcesRequest": { + "type": "structure", + "members": { + "Filters": { + "target": "com.amazonaws.devopsguru#ListMonitoredResourcesFilters", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tFilters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status.\n\t\t

        ", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.devopsguru#ListMonitoredResourcesMaxResults", + "traits": { + "smithy.api#documentation": "

        The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned nextToken value.

        " + } + }, + "NextToken": { + "target": "com.amazonaws.devopsguru#UuidNextToken", + "traits": { + "smithy.api#documentation": "

        The pagination token to use to retrieve \n the next page of results for this operation. If this value is null, it retrieves the first page.

        " + } + } + } + }, + "com.amazonaws.devopsguru#ListMonitoredResourcesResponse": { + "type": "structure", + "members": { + "MonitoredResourceIdentifiers": { + "target": "com.amazonaws.devopsguru#MonitoredResourceIdentifiers", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tInformation about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource.\n\t\t

        ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.devopsguru#UuidNextToken", + "traits": { + "smithy.api#documentation": "

        The pagination token to use to retrieve \n the next page of results for this operation. If there are no more pages, this value is null.

        " + } + } + } + }, "com.amazonaws.devopsguru#ListNotificationChannels": { "type": "operation", "input": { @@ -3119,6 +3389,197 @@ ] } }, + "com.amazonaws.devopsguru#LogAnomalyClass": { + "type": "structure", + "members": { + "LogStreamName": { + "target": "com.amazonaws.devopsguru#LogStreamName", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe name of the Amazon CloudWatch log stream that the anomalous log event belongs to. A log stream is a sequence of log events that share the same source.\n\t\t

        " + } + }, + "LogAnomalyType": { + "target": "com.amazonaws.devopsguru#LogAnomalyType", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe type of log anomaly that has been detected. \n\t\t

        " + } + }, + "LogAnomalyToken": { + "target": "com.amazonaws.devopsguru#LogAnomalyToken", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe token where the anomaly was detected. This may refer to an exception or another location, or it may be blank for log anomalies such as format anomalies.\n\t\t

        " + } + }, + "LogEventId": { + "target": "com.amazonaws.devopsguru#LogEventId", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe ID of the log event.\n\t\t

        " + } + }, + "Explanation": { + "target": "com.amazonaws.devopsguru#Explanation", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe explanation for why the log event is considered an anomaly.\n\t\t

        " + } + }, + "NumberOfLogLinesOccurrences": { + "target": "com.amazonaws.devopsguru#NumberOfLogLinesOccurrences", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe number of log lines where this anomalous log event occurs.\n\t\t

        " + } + }, + "LogEventTimestamp": { + "target": "com.amazonaws.devopsguru#Timestamp", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe time of the first occurrence of the anomalous log event.\n\t\t

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        \n\t\t\tInformation about an anomalous log event found within a log group.\n\t\t

        " + } + }, + "com.amazonaws.devopsguru#LogAnomalyClasses": { + "type": "list", + "member": { + "target": "com.amazonaws.devopsguru#LogAnomalyClass" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.devopsguru#LogAnomalyShowcase": { + "type": "structure", + "members": { + "LogAnomalyClasses": { + "target": "com.amazonaws.devopsguru#LogAnomalyClasses", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tA list of anomalous log events that may be related.\n\t\t

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        \n\t\t\tA cluster of similar anomalous log events found within a log group.\n\t\t

        " + } + }, + "com.amazonaws.devopsguru#LogAnomalyShowcases": { + "type": "list", + "member": { + "target": "com.amazonaws.devopsguru#LogAnomalyShowcase" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, + "com.amazonaws.devopsguru#LogAnomalyToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.devopsguru#LogAnomalyType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "KEYWORD", + "name": "KEYWORD" + }, + { + "value": "KEYWORD_TOKEN", + "name": "KEYWORD_TOKEN" + }, + { + "value": "FORMAT", + "name": "FORMAT" + }, + { + "value": "HTTP_CODE", + "name": "HTTP_CODE" + }, + { + "value": "BLOCK_FORMAT", + "name": "BLOCK_FORMAT" + }, + { + "value": "NUMERICAL_POINT", + "name": "NUMERICAL_POINT" + }, + { + "value": "NUMERICAL_NAN", + "name": "NUMERICAL_NAN" + }, + { + "value": "NEW_FIELD_NAME", + "name": "NEW_FIELD_NAME" + } + ] + } + }, + "com.amazonaws.devopsguru#LogEventId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, + "com.amazonaws.devopsguru#LogGroupName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, + "com.amazonaws.devopsguru#LogStreamName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, + "com.amazonaws.devopsguru#LogsAnomalyDetectionIntegration": { + "type": "structure", + "members": { + "OptInStatus": { + "target": "com.amazonaws.devopsguru#OptInStatus", + "traits": { + "smithy.api#documentation": "

        Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        \n\t\t\tInformation about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection.\n\t\t

        " + } + }, + "com.amazonaws.devopsguru#LogsAnomalyDetectionIntegrationConfig": { + "type": "structure", + "members": { + "OptInStatus": { + "target": "com.amazonaws.devopsguru#OptInStatus", + "traits": { + "smithy.api#documentation": "

        Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        \n\t\t\tInformation about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection. You can use this to update the configuration.\n\t\t

        " + } + }, "com.amazonaws.devopsguru#MeanTimeToRecoverInMilliseconds": { "type": "long", "traits": { @@ -3131,6 +3592,48 @@ "smithy.api#box": {} } }, + "com.amazonaws.devopsguru#MonitoredResourceIdentifier": { + "type": "structure", + "members": { + "MonitoredResourceName": { + "target": "com.amazonaws.devopsguru#MonitoredResourceName", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe name of the resource being monitored.\n\t\t

        " + } + }, + "Type": { + "target": "com.amazonaws.devopsguru#ResourceType", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe type of resource being monitored.\n\t\t

        " + } + }, + "ResourcePermission": { + "target": "com.amazonaws.devopsguru#ResourcePermission", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tThe permission status of a resource.\n\t\t

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        \n\t\t\tInformation about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource.\n\t\t

        " + } + }, + "com.amazonaws.devopsguru#MonitoredResourceIdentifiers": { + "type": "list", + "member": { + "target": "com.amazonaws.devopsguru#MonitoredResourceIdentifier" + } + }, + "com.amazonaws.devopsguru#MonitoredResourceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + }, + "smithy.api#pattern": "^[\\.\\-_\\/#A-Za-z0-9]+$" + } + }, "com.amazonaws.devopsguru#NotificationChannel": { "type": "structure", "members": { @@ -3191,6 +3694,12 @@ "com.amazonaws.devopsguru#NumReactiveInsights": { "type": "integer" }, + "com.amazonaws.devopsguru#NumberOfLogLinesOccurrences": { + "type": "integer" + }, + "com.amazonaws.devopsguru#NumberOfLogLinesScanned": { + "type": "integer" + }, "com.amazonaws.devopsguru#OpsCenterIntegration": { "type": "structure", "members": { @@ -3216,7 +3725,7 @@ } }, "traits": { - "smithy.api#documentation": "

        Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager\n\t\t\tOpsCenter for each created insight.

        " + "smithy.api#documentation": "

        Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager\n\t\t\tOpsCenter for each created insight. You can use this to update the configuration.

        " } }, "com.amazonaws.devopsguru#OptInStatus": { @@ -4709,6 +5218,21 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.devopsguru#ResourcePermission": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FULL_PERMISSION", + "name": "FULL_PERMISSION" + }, + { + "value": "MISSING_PERMISSION", + "name": "MISSING_PERMISSION" + } + ] + } + }, "com.amazonaws.devopsguru#ResourceType": { "type": "string", "traits": { @@ -4719,6 +5243,23 @@ "smithy.api#pattern": "^[a-zA-Z]+[a-zA-Z0-9-_:]*$" } }, + "com.amazonaws.devopsguru#ResourceTypeFilter": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "LOG_GROUPS", + "name": "LOG_GROUPS" + } + ] + } + }, + "com.amazonaws.devopsguru#ResourceTypeFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.devopsguru#ResourceTypeFilter" + } + }, "com.amazonaws.devopsguru#RetryAfterSeconds": { "type": "integer" }, @@ -5075,6 +5616,12 @@ "traits": { "smithy.api#documentation": "

        Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager\n\t\t\tOpsCenter for each created insight.

        " } + }, + "LogsAnomalyDetection": { + "target": "com.amazonaws.devopsguru#LogsAnomalyDetectionIntegration", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tInformation about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups.\n\t\t

        " + } } }, "traits": { @@ -5767,6 +6314,12 @@ "members": { "OpsCenter": { "target": "com.amazonaws.devopsguru#OpsCenterIntegrationConfig" + }, + "LogsAnomalyDetection": { + "target": "com.amazonaws.devopsguru#LogsAnomalyDetectionIntegrationConfig", + "traits": { + "smithy.api#documentation": "

        \n\t\t\tInformation about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups.\n\t\t

        " + } } }, "traits": { From cc8711ec0859d60e5f82bd84382fd93b6aefca3a Mon Sep 17 00:00:00 2001 From: awstools Date: Tue, 19 Jul 2022 18:24:17 +0000 Subject: [PATCH 12/31] docs(client-glue): Documentation updates for AWS Glue Job Timeout and Autoscaling --- clients/client-glue/src/models/models_0.ts | 7 ++++--- clients/client-glue/src/models/models_2.ts | 5 +++-- codegen/sdk-codegen/aws-models/glue.json | 6 +++--- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/clients/client-glue/src/models/models_0.ts b/clients/client-glue/src/models/models_0.ts index 5b34f5ac68faa..9d6d0dc263920 100644 --- a/clients/client-glue/src/models/models_0.ts +++ b/clients/client-glue/src/models/models_0.ts @@ -5644,8 +5644,9 @@ export interface JobRun { /** *

        The JobRun timeout in minutes. This is the maximum time that a job run can - * consume resources before it is terminated and enters TIMEOUT status. The default - * is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

        + * consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

        + * + *

        Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).

        */ Timeout?: number; @@ -5726,7 +5727,7 @@ export interface JobRun { GlueVersion?: string; /** - *

        This field populates only when an Auto Scaling job run completes, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X and 2 for G.2X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.

        + *

        This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.

        */ DPUSeconds?: number; } diff --git a/clients/client-glue/src/models/models_2.ts b/clients/client-glue/src/models/models_2.ts index 143e26bea0ef1..1575adbd749c8 100644 --- a/clients/client-glue/src/models/models_2.ts +++ b/clients/client-glue/src/models/models_2.ts @@ -833,8 +833,9 @@ export interface StartJobRunRequest { /** *

        The JobRun timeout in minutes. This is the maximum time that a job run can - * consume resources before it is terminated and enters TIMEOUT status. The default - * is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

        + * consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

        + * + *

        Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).

        */ Timeout?: number; diff --git a/codegen/sdk-codegen/aws-models/glue.json b/codegen/sdk-codegen/aws-models/glue.json index 91c01e8a649b0..b2432b24918a5 100644 --- a/codegen/sdk-codegen/aws-models/glue.json +++ b/codegen/sdk-codegen/aws-models/glue.json @@ -16018,7 +16018,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

        The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. The default\n is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

        " + "smithy.api#documentation": "

        The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

        \n \n

        Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).

        " } }, "MaxCapacity": { @@ -16066,7 +16066,7 @@ "DPUSeconds": { "target": "com.amazonaws.glue#NullableDouble", "traits": { - "smithy.api#documentation": "

        This field populates only when an Auto Scaling job run completes, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X and 2 for G.2X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.

        " + "smithy.api#documentation": "

        This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.

        " } } }, @@ -23233,7 +23233,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

        The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. The default\n is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.

        " + "smithy.api#documentation": "

        The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

        \n \n

        Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).

        " } }, "MaxCapacity": { From 6481e598bf2360dfe18714996867eeed605a6f82 Mon Sep 17 00:00:00 2001 From: awstools Date: Tue, 19 Jul 2022 18:24:17 +0000 Subject: [PATCH 13/31] feat(client-workspaces): Increased the character limit of the login message from 850 to 2000 characters. --- clients/client-workspaces/src/protocols/Aws_json1_1.ts | 3 +++ codegen/sdk-codegen/aws-models/workspaces.json | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/clients/client-workspaces/src/protocols/Aws_json1_1.ts b/clients/client-workspaces/src/protocols/Aws_json1_1.ts index 9b749f8abb36e..812da26737eff 100644 --- a/clients/client-workspaces/src/protocols/Aws_json1_1.ts +++ b/clients/client-workspaces/src/protocols/Aws_json1_1.ts @@ -3119,6 +3119,9 @@ const deserializeAws_json1_1ImportClientBrandingCommandError = async ( case "ResourceLimitExceededException": case "com.amazonaws.workspaces#ResourceLimitExceededException": throw await deserializeAws_json1_1ResourceLimitExceededExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.workspaces#ResourceNotFoundException": + throw await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context); default: const parsedBody = parsedOutput.body; const $metadata = deserializeMetadata(output); diff --git a/codegen/sdk-codegen/aws-models/workspaces.json b/codegen/sdk-codegen/aws-models/workspaces.json index 7e6304c26a24f..8f53d72ead952 100644 --- a/codegen/sdk-codegen/aws-models/workspaces.json +++ b/codegen/sdk-codegen/aws-models/workspaces.json @@ -475,7 +475,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 850 + "max": 2000 }, "smithy.api#pattern": "^.*$" } @@ -3160,6 +3160,9 @@ }, { "target": "com.amazonaws.workspaces#ResourceLimitExceededException" + }, + { + "target": "com.amazonaws.workspaces#ResourceNotFoundException" } ], "traits": { From 316a691491232facc572b8482f9084355702bfbf Mon Sep 17 00:00:00 2001 From: awstools Date: Tue, 19 Jul 2022 18:24:17 +0000 Subject: [PATCH 14/31] feat(client-sagemaker-edge): Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. --- clients/client-sagemaker-edge/README.md | 14 +- .../src/SagemakerEdge.ts | 37 ++ .../src/SagemakerEdgeClient.ts | 11 +- .../src/commands/GetDeploymentsCommand.ts | 96 +++++ .../src/commands/index.ts | 1 + .../src/models/models_0.ts | 279 ++++++++++++- .../src/protocols/Aws_restJson1.ts | 174 +++++++- .../aws-models/sagemaker-edge.json | 390 +++++++++++++++++- 8 files changed, 968 insertions(+), 34 deletions(-) create mode 100644 clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts diff --git a/clients/client-sagemaker-edge/README.md b/clients/client-sagemaker-edge/README.md index c722eee324e6a..0ba9295e4b767 100644 --- a/clients/client-sagemaker-edge/README.md +++ b/clients/client-sagemaker-edge/README.md @@ -26,16 +26,16 @@ using your favorite package manager: The AWS SDK is modulized by clients and commands. To send a request, you only need to import the `SagemakerEdgeClient` and -the commands you need, for example `GetDeviceRegistrationCommand`: +the commands you need, for example `GetDeploymentsCommand`: ```js // ES5 example -const { SagemakerEdgeClient, GetDeviceRegistrationCommand } = require("@aws-sdk/client-sagemaker-edge"); +const { SagemakerEdgeClient, GetDeploymentsCommand } = require("@aws-sdk/client-sagemaker-edge"); ``` ```ts // ES6+ example -import { SagemakerEdgeClient, GetDeviceRegistrationCommand } from "@aws-sdk/client-sagemaker-edge"; +import { SagemakerEdgeClient, GetDeploymentsCommand } from "@aws-sdk/client-sagemaker-edge"; ``` ### Usage @@ -54,7 +54,7 @@ const client = new SagemakerEdgeClient({ region: "REGION" }); const params = { /** input parameters */ }; -const command = new GetDeviceRegistrationCommand(params); +const command = new GetDeploymentsCommand(params); ``` #### Async/await @@ -133,7 +133,7 @@ const client = new AWS.SagemakerEdge({ region: "REGION" }); // async/await. try { - const data = await client.getDeviceRegistration(params); + const data = await client.getDeployments(params); // process data. } catch (error) { // error handling. @@ -141,7 +141,7 @@ try { // Promises. client - .getDeviceRegistration(params) + .getDeployments(params) .then((data) => { // process data. }) @@ -150,7 +150,7 @@ client }); // callbacks. -client.getDeviceRegistration(params, (err, data) => { +client.getDeployments(params, (err, data) => { // process err and data. }); ``` diff --git a/clients/client-sagemaker-edge/src/SagemakerEdge.ts b/clients/client-sagemaker-edge/src/SagemakerEdge.ts index 3fe0b07e4ca1d..86596d9205360 100644 --- a/clients/client-sagemaker-edge/src/SagemakerEdge.ts +++ b/clients/client-sagemaker-edge/src/SagemakerEdge.ts @@ -1,6 +1,11 @@ // smithy-typescript generated code import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; +import { + GetDeploymentsCommand, + GetDeploymentsCommandInput, + GetDeploymentsCommandOutput, +} from "./commands/GetDeploymentsCommand"; import { GetDeviceRegistrationCommand, GetDeviceRegistrationCommandInput, @@ -17,6 +22,38 @@ import { SagemakerEdgeClient } from "./SagemakerEdgeClient"; *

        SageMaker Edge Manager dataplane service for communicating with active agents.

        */ export class SagemakerEdge extends SagemakerEdgeClient { + /** + *

        Use to get the active deployments from a device.

        + */ + public getDeployments( + args: GetDeploymentsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getDeployments( + args: GetDeploymentsCommandInput, + cb: (err: any, data?: GetDeploymentsCommandOutput) => void + ): void; + public getDeployments( + args: GetDeploymentsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetDeploymentsCommandOutput) => void + ): void; + public getDeployments( + args: GetDeploymentsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetDeploymentsCommandOutput) => void), + cb?: (err: any, data?: GetDeploymentsCommandOutput) => void + ): Promise | void { + const command = new GetDeploymentsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

        Use to check if a device is registered with SageMaker Edge Manager.

        */ diff --git a/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts b/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts index cb2bd0a0a254e..74830c6e85e6f 100644 --- a/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts +++ b/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts @@ -53,6 +53,7 @@ import { UserAgent as __UserAgent, } from "@aws-sdk/types"; +import { GetDeploymentsCommandInput, GetDeploymentsCommandOutput } from "./commands/GetDeploymentsCommand"; import { GetDeviceRegistrationCommandInput, GetDeviceRegistrationCommandOutput, @@ -60,9 +61,15 @@ import { import { SendHeartbeatCommandInput, SendHeartbeatCommandOutput } from "./commands/SendHeartbeatCommand"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; -export type ServiceInputTypes = GetDeviceRegistrationCommandInput | SendHeartbeatCommandInput; +export type ServiceInputTypes = + | GetDeploymentsCommandInput + | GetDeviceRegistrationCommandInput + | SendHeartbeatCommandInput; -export type ServiceOutputTypes = GetDeviceRegistrationCommandOutput | SendHeartbeatCommandOutput; +export type ServiceOutputTypes = + | GetDeploymentsCommandOutput + | GetDeviceRegistrationCommandOutput + | SendHeartbeatCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** diff --git a/clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts b/clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts new file mode 100644 index 0000000000000..ee63ab1e58a95 --- /dev/null +++ b/clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetDeploymentsRequest, GetDeploymentsResult } from "../models/models_0"; +import { + deserializeAws_restJson1GetDeploymentsCommand, + serializeAws_restJson1GetDeploymentsCommand, +} from "../protocols/Aws_restJson1"; +import { SagemakerEdgeClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SagemakerEdgeClient"; + +export interface GetDeploymentsCommandInput extends GetDeploymentsRequest {} +export interface GetDeploymentsCommandOutput extends GetDeploymentsResult, __MetadataBearer {} + +/** + *

        Use to get the active deployments from a device.

        + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SagemakerEdgeClient, GetDeploymentsCommand } from "@aws-sdk/client-sagemaker-edge"; // ES Modules import + * // const { SagemakerEdgeClient, GetDeploymentsCommand } = require("@aws-sdk/client-sagemaker-edge"); // CommonJS import + * const client = new SagemakerEdgeClient(config); + * const command = new GetDeploymentsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetDeploymentsCommandInput} for command's `input` shape. + * @see {@link GetDeploymentsCommandOutput} for command's `response` shape. + * @see {@link SagemakerEdgeClientResolvedConfig | config} for SagemakerEdgeClient's `config` shape. + * + */ +export class GetDeploymentsCommand extends $Command< + GetDeploymentsCommandInput, + GetDeploymentsCommandOutput, + SagemakerEdgeClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetDeploymentsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: SagemakerEdgeClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "SagemakerEdgeClient"; + const commandName = "GetDeploymentsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetDeploymentsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetDeploymentsResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetDeploymentsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetDeploymentsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetDeploymentsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-sagemaker-edge/src/commands/index.ts b/clients/client-sagemaker-edge/src/commands/index.ts index ffe0085a3bb53..1a7be76b57bcb 100644 --- a/clients/client-sagemaker-edge/src/commands/index.ts +++ b/clients/client-sagemaker-edge/src/commands/index.ts @@ -1,3 +1,4 @@ // smithy-typescript generated code +export * from "./GetDeploymentsCommand"; export * from "./GetDeviceRegistrationCommand"; export * from "./SendHeartbeatCommand"; diff --git a/clients/client-sagemaker-edge/src/models/models_0.ts b/clients/client-sagemaker-edge/src/models/models_0.ts index 2032ac8181c2f..ce8122c45a7ea 100644 --- a/clients/client-sagemaker-edge/src/models/models_0.ts +++ b/clients/client-sagemaker-edge/src/models/models_0.ts @@ -3,9 +3,9 @@ import { ExceptionOptionType as __ExceptionOptionType } from "@aws-sdk/smithy-cl import { SagemakerEdgeServiceException as __BaseException } from "./SagemakerEdgeServiceException"; -export interface GetDeviceRegistrationRequest { +export interface GetDeploymentsRequest { /** - *

        The unique name of the device you want to get the registration status from.

        + *

        The unique name of the device you want to get the configuration of active deployments from.

        */ DeviceName: string | undefined; @@ -15,39 +15,144 @@ export interface GetDeviceRegistrationRequest { DeviceFleetName: string | undefined; } -export namespace GetDeviceRegistrationRequest { +export namespace GetDeploymentsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: GetDeviceRegistrationRequest): any => ({ + export const filterSensitiveLog = (obj: GetDeploymentsRequest): any => ({ ...obj, }); } -export interface GetDeviceRegistrationResult { +export enum ChecksumType { + Sha1 = "SHA1", +} + +/** + *

        Information about the checksum of a model deployed on a device.

        + */ +export interface Checksum { /** - *

        Describes if the device is currently registered with SageMaker Edge Manager.

        + *

        The type of the checksum.

        */ - DeviceRegistration?: string; + Type?: ChecksumType | string; /** - *

        The amount of time, in seconds, that the registration status is stored on the device’s cache before it is refreshed.

        + *

        The checksum of the model.

        */ - CacheTTL?: string; + Sum?: string; } -export namespace GetDeviceRegistrationResult { +export namespace Checksum { /** * @internal */ - export const filterSensitiveLog = (obj: GetDeviceRegistrationResult): any => ({ + export const filterSensitiveLog = (obj: Checksum): any => ({ + ...obj, + }); +} + +export enum ModelState { + Deploy = "DEPLOY", + Undeploy = "UNDEPLOY", +} + +/** + *

        + */ +export interface Definition { + /** + *

        The unique model handle.

        + */ + ModelHandle?: string; + + /** + *

        The absolute S3 location of the model.

        + */ + S3Url?: string; + + /** + *

        The checksum information of the model.

        + */ + Checksum?: Checksum; + + /** + *

        The desired state of the model.

        + */ + State?: ModelState | string; +} + +export namespace Definition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Definition): any => ({ + ...obj, + }); +} + +export enum FailureHandlingPolicy { + DoNothing = "DO_NOTHING", + RollbackOnFailure = "ROLLBACK_ON_FAILURE", +} + +export enum DeploymentType { + Model = "Model", +} + +/** + *

        Information about a deployment on an edge device that is registered with SageMaker Edge Manager.

        + */ +export interface EdgeDeployment { + /** + *

        The name and unique ID of the deployment.

        + */ + DeploymentName?: string; + + /** + *

        The type of the deployment.

        + */ + Type?: DeploymentType | string; + + /** + *

        Determines whether to rollback to previous configuration if deployment fails.

        + */ + FailureHandlingPolicy?: FailureHandlingPolicy | string; + + /** + *

        Returns a list of Definition objects.

        + */ + Definitions?: Definition[]; +} + +export namespace EdgeDeployment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EdgeDeployment): any => ({ + ...obj, + }); +} + +export interface GetDeploymentsResult { + /** + *

        Returns a list of the configurations of the active deployments on the device.

        + */ + Deployments?: EdgeDeployment[]; +} + +export namespace GetDeploymentsResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDeploymentsResult): any => ({ ...obj, }); } /** *

        An internal failure occurred. Try your request again. If the problem - * persists, contact AWS customer support.

        + * persists, contact Amazon Web Services customer support.

        */ export class InternalServiceException extends __BaseException { readonly name: "InternalServiceException" = "InternalServiceException"; @@ -67,6 +172,48 @@ export class InternalServiceException extends __BaseException { } } +export interface GetDeviceRegistrationRequest { + /** + *

        The unique name of the device you want to get the registration status from.

        + */ + DeviceName: string | undefined; + + /** + *

        The name of the fleet that the device belongs to.

        + */ + DeviceFleetName: string | undefined; +} + +export namespace GetDeviceRegistrationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDeviceRegistrationRequest): any => ({ + ...obj, + }); +} + +export interface GetDeviceRegistrationResult { + /** + *

        Describes if the device is currently registered with SageMaker Edge Manager.

        + */ + DeviceRegistration?: string; + + /** + *

        The amount of time, in seconds, that the registration status is stored on the device’s cache before it is refreshed.

        + */ + CacheTTL?: string; +} + +export namespace GetDeviceRegistrationResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDeviceRegistrationResult): any => ({ + ...obj, + }); +} + /** *

        Information required for edge device metrics.

        */ @@ -101,6 +248,109 @@ export namespace EdgeMetric { }); } +export enum DeploymentStatus { + Fail = "FAIL", + Success = "SUCCESS", +} + +/** + *

        + */ +export interface DeploymentModel { + /** + *

        The unique handle of the model.

        + */ + ModelHandle?: string; + + /** + *

        The name of the model.

        + */ + ModelName?: string; + + /** + *

        The version of the model.

        + */ + ModelVersion?: string; + + /** + *

        The desired state of the model.

        + */ + DesiredState?: ModelState | string; + + /** + *

        Returns the current state of the model.

        + */ + State?: ModelState | string; + + /** + *

        Returns the deployment status of the model.

        + */ + Status?: DeploymentStatus | string; + + /** + *

        Returns the error message for the deployment status result.

        + */ + StatusReason?: string; + + /** + *

        Returns the error message if there is a rollback.

        + */ + RollbackFailureReason?: string; +} + +export namespace DeploymentModel { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentModel): any => ({ + ...obj, + }); +} + +/** + *

        Information about the result of a deployment on an edge device that is registered with SageMaker Edge Manager.

        + */ +export interface DeploymentResult { + /** + *

        The name and unique ID of the deployment.

        + */ + DeploymentName?: string; + + /** + *

        Returns the bucket error code.

        + */ + DeploymentStatus?: string; + + /** + *

        Returns the detailed error message.

        + */ + DeploymentStatusMessage?: string; + + /** + *

        The timestamp of when the deployment was started on the agent.

        + */ + DeploymentStartTime?: Date; + + /** + *

        The timestamp of when the deployment was ended, and the agent got the deployment results.

        + */ + DeploymentEndTime?: Date; + + /** + *

        Returns a list of models deployed on the agent.

        + */ + DeploymentModels?: DeploymentModel[]; +} + +export namespace DeploymentResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentResult): any => ({ + ...obj, + }); +} + /** *

        Information about a model deployed on an edge device that is registered with SageMaker Edge Manager.

        */ @@ -165,6 +415,11 @@ export interface SendHeartbeatRequest { *

        The name of the fleet that the device belongs to.

        */ DeviceFleetName: string | undefined; + + /** + *

        Returns the result of a deployment on the device.

        + */ + DeploymentResult?: DeploymentResult; } export namespace SendHeartbeatRequest { diff --git a/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts b/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts index 9708ee997581d..2b3e569406c96 100644 --- a/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts +++ b/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts @@ -13,14 +13,49 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; +import { GetDeploymentsCommandInput, GetDeploymentsCommandOutput } from "../commands/GetDeploymentsCommand"; import { GetDeviceRegistrationCommandInput, GetDeviceRegistrationCommandOutput, } from "../commands/GetDeviceRegistrationCommand"; import { SendHeartbeatCommandInput, SendHeartbeatCommandOutput } from "../commands/SendHeartbeatCommand"; -import { EdgeMetric, InternalServiceException, Model } from "../models/models_0"; +import { + Checksum, + Definition, + DeploymentModel, + DeploymentResult, + EdgeDeployment, + EdgeMetric, + InternalServiceException, + Model, +} from "../models/models_0"; import { SagemakerEdgeServiceException as __BaseException } from "../models/SagemakerEdgeServiceException"; +export const serializeAws_restJson1GetDeploymentsCommand = async ( + input: GetDeploymentsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetDeployments"; + let body: any; + body = JSON.stringify({ + ...(input.DeviceFleetName != null && { DeviceFleetName: input.DeviceFleetName }), + ...(input.DeviceName != null && { DeviceName: input.DeviceName }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1GetDeviceRegistrationCommand = async ( input: GetDeviceRegistrationCommandInput, context: __SerdeContext @@ -59,6 +94,9 @@ export const serializeAws_restJson1SendHeartbeatCommand = async ( body = JSON.stringify({ ...(input.AgentMetrics != null && { AgentMetrics: serializeAws_restJson1EdgeMetrics(input.AgentMetrics, context) }), ...(input.AgentVersion != null && { AgentVersion: input.AgentVersion }), + ...(input.DeploymentResult != null && { + DeploymentResult: serializeAws_restJson1DeploymentResult(input.DeploymentResult, context), + }), ...(input.DeviceFleetName != null && { DeviceFleetName: input.DeviceFleetName }), ...(input.DeviceName != null && { DeviceName: input.DeviceName }), ...(input.Models != null && { Models: serializeAws_restJson1Models(input.Models, context) }), @@ -74,6 +112,51 @@ export const serializeAws_restJson1SendHeartbeatCommand = async ( }); }; +export const deserializeAws_restJson1GetDeploymentsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetDeploymentsCommandError(output, context); + } + const contents: GetDeploymentsCommandOutput = { + $metadata: deserializeMetadata(output), + Deployments: undefined, + }; + const data: Record = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Deployments !== undefined && data.Deployments !== null) { + contents.Deployments = deserializeAws_restJson1EdgeDeployments(data.Deployments, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetDeploymentsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServiceException": + case "com.amazonaws.sagemakeredge#InternalServiceException": + throw await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1GetDeviceRegistrationCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -180,6 +263,45 @@ const deserializeAws_restJson1InternalServiceExceptionResponse = async ( return __decorateServiceException(exception, parsedOutput.body); }; +const serializeAws_restJson1DeploymentModel = (input: DeploymentModel, context: __SerdeContext): any => { + return { + ...(input.DesiredState != null && { DesiredState: input.DesiredState }), + ...(input.ModelHandle != null && { ModelHandle: input.ModelHandle }), + ...(input.ModelName != null && { ModelName: input.ModelName }), + ...(input.ModelVersion != null && { ModelVersion: input.ModelVersion }), + ...(input.RollbackFailureReason != null && { RollbackFailureReason: input.RollbackFailureReason }), + ...(input.State != null && { State: input.State }), + ...(input.Status != null && { Status: input.Status }), + ...(input.StatusReason != null && { StatusReason: input.StatusReason }), + }; +}; + +const serializeAws_restJson1DeploymentModels = (input: DeploymentModel[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1DeploymentModel(entry, context); + }); +}; + +const serializeAws_restJson1DeploymentResult = (input: DeploymentResult, context: __SerdeContext): any => { + return { + ...(input.DeploymentEndTime != null && { DeploymentEndTime: Math.round(input.DeploymentEndTime.getTime() / 1000) }), + ...(input.DeploymentModels != null && { + DeploymentModels: serializeAws_restJson1DeploymentModels(input.DeploymentModels, context), + }), + ...(input.DeploymentName != null && { DeploymentName: input.DeploymentName }), + ...(input.DeploymentStartTime != null && { + DeploymentStartTime: Math.round(input.DeploymentStartTime.getTime() / 1000), + }), + ...(input.DeploymentStatus != null && { DeploymentStatus: input.DeploymentStatus }), + ...(input.DeploymentStatusMessage != null && { DeploymentStatusMessage: input.DeploymentStatusMessage }), + }; +}; + const serializeAws_restJson1EdgeMetric = (input: EdgeMetric, context: __SerdeContext): any => { return { ...(input.Dimension != null && { Dimension: input.Dimension }), @@ -221,6 +343,56 @@ const serializeAws_restJson1Models = (input: Model[], context: __SerdeContext): }); }; +const deserializeAws_restJson1Checksum = (output: any, context: __SerdeContext): Checksum => { + return { + Sum: __expectString(output.Sum), + Type: __expectString(output.Type), + } as any; +}; + +const deserializeAws_restJson1Definition = (output: any, context: __SerdeContext): Definition => { + return { + Checksum: output.Checksum != null ? deserializeAws_restJson1Checksum(output.Checksum, context) : undefined, + ModelHandle: __expectString(output.ModelHandle), + S3Url: __expectString(output.S3Url), + State: __expectString(output.State), + } as any; +}; + +const deserializeAws_restJson1Definitions = (output: any, context: __SerdeContext): Definition[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Definition(entry, context); + }); + return retVal; +}; + +const deserializeAws_restJson1EdgeDeployment = (output: any, context: __SerdeContext): EdgeDeployment => { + return { + Definitions: + output.Definitions != null ? deserializeAws_restJson1Definitions(output.Definitions, context) : undefined, + DeploymentName: __expectString(output.DeploymentName), + FailureHandlingPolicy: __expectString(output.FailureHandlingPolicy), + Type: __expectString(output.Type), + } as any; +}; + +const deserializeAws_restJson1EdgeDeployments = (output: any, context: __SerdeContext): EdgeDeployment[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1EdgeDeployment(entry, context); + }); + return retVal; +}; + const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ httpStatusCode: output.statusCode, requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], diff --git a/codegen/sdk-codegen/aws-models/sagemaker-edge.json b/codegen/sdk-codegen/aws-models/sagemaker-edge.json index 5b9dc10e3f78d..5f5f5ccb5be25 100644 --- a/codegen/sdk-codegen/aws-models/sagemaker-edge.json +++ b/codegen/sdk-codegen/aws-models/sagemaker-edge.json @@ -31,15 +31,6 @@ "shapes": { "com.amazonaws.sagemakeredge#AmazonSageMakerEdge": { "type": "service", - "version": "2020-09-23", - "operations": [ - { - "target": "com.amazonaws.sagemakeredge#GetDeviceRegistration" - }, - { - "target": "com.amazonaws.sagemakeredge#SendHeartbeat" - } - ], "traits": { "aws.api#service": { "sdkId": "Sagemaker Edge", @@ -54,7 +45,19 @@ "aws.protocols#restJson1": {}, "smithy.api#documentation": "

        SageMaker Edge Manager dataplane service for communicating with active agents.

        ", "smithy.api#title": "Amazon Sagemaker Edge Manager" - } + }, + "version": "2020-09-23", + "operations": [ + { + "target": "com.amazonaws.sagemakeredge#GetDeployments" + }, + { + "target": "com.amazonaws.sagemakeredge#GetDeviceRegistration" + }, + { + "target": "com.amazonaws.sagemakeredge#SendHeartbeat" + } + ] }, "com.amazonaws.sagemakeredge#CacheTTLSeconds": { "type": "string", @@ -65,6 +68,217 @@ } } }, + "com.amazonaws.sagemakeredge#Checksum": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.sagemakeredge#ChecksumType", + "traits": { + "smithy.api#documentation": "

        The type of the checksum.

        " + } + }, + "Sum": { + "target": "com.amazonaws.sagemakeredge#ChecksumString", + "traits": { + "smithy.api#documentation": "

        The checksum of the model.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        Information about the checksum of a model deployed on a device.

        " + } + }, + "com.amazonaws.sagemakeredge#ChecksumString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 63 + }, + "smithy.api#pattern": "^[a-z0-9](-*[a-z0-9])*$" + } + }, + "com.amazonaws.sagemakeredge#ChecksumType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SHA1", + "name": "Sha1" + } + ] + } + }, + "com.amazonaws.sagemakeredge#Definition": { + "type": "structure", + "members": { + "ModelHandle": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "

        The unique model handle.

        " + } + }, + "S3Url": { + "target": "com.amazonaws.sagemakeredge#S3Uri", + "traits": { + "smithy.api#documentation": "

        The absolute S3 location of the model.

        " + } + }, + "Checksum": { + "target": "com.amazonaws.sagemakeredge#Checksum", + "traits": { + "smithy.api#documentation": "

        The checksum information of the model.

        " + } + }, + "State": { + "target": "com.amazonaws.sagemakeredge#ModelState", + "traits": { + "smithy.api#documentation": "

        The desired state of the model.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        " + } + }, + "com.amazonaws.sagemakeredge#Definitions": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakeredge#Definition" + } + }, + "com.amazonaws.sagemakeredge#DeploymentModel": { + "type": "structure", + "members": { + "ModelHandle": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "

        The unique handle of the model.

        " + } + }, + "ModelName": { + "target": "com.amazonaws.sagemakeredge#ModelName", + "traits": { + "smithy.api#documentation": "

        The name of the model.

        " + } + }, + "ModelVersion": { + "target": "com.amazonaws.sagemakeredge#Version", + "traits": { + "smithy.api#documentation": "

        The version of the model.

        " + } + }, + "DesiredState": { + "target": "com.amazonaws.sagemakeredge#ModelState", + "traits": { + "smithy.api#documentation": "

        The desired state of the model.

        " + } + }, + "State": { + "target": "com.amazonaws.sagemakeredge#ModelState", + "traits": { + "smithy.api#documentation": "

        Returns the current state of the model.

        " + } + }, + "Status": { + "target": "com.amazonaws.sagemakeredge#DeploymentStatus", + "traits": { + "smithy.api#documentation": "

        Returns the deployment status of the model.

        " + } + }, + "StatusReason": { + "target": "com.amazonaws.sagemakeredge#String", + "traits": { + "smithy.api#documentation": "

        Returns the error message for the deployment status result.

        " + } + }, + "RollbackFailureReason": { + "target": "com.amazonaws.sagemakeredge#String", + "traits": { + "smithy.api#documentation": "

        Returns the error message if there is a rollback.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        " + } + }, + "com.amazonaws.sagemakeredge#DeploymentModels": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakeredge#DeploymentModel" + } + }, + "com.amazonaws.sagemakeredge#DeploymentResult": { + "type": "structure", + "members": { + "DeploymentName": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "

        The name and unique ID of the deployment.

        " + } + }, + "DeploymentStatus": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "

        Returns the bucket error code.

        " + } + }, + "DeploymentStatusMessage": { + "target": "com.amazonaws.sagemakeredge#String", + "traits": { + "smithy.api#documentation": "

        Returns the detailed error message.

        " + } + }, + "DeploymentStartTime": { + "target": "com.amazonaws.sagemakeredge#Timestamp", + "traits": { + "smithy.api#documentation": "

        The timestamp of when the deployment was started on the agent.

        " + } + }, + "DeploymentEndTime": { + "target": "com.amazonaws.sagemakeredge#Timestamp", + "traits": { + "smithy.api#documentation": "

        The timestamp of when the deployment was ended, and the agent got the deployment results.

        " + } + }, + "DeploymentModels": { + "target": "com.amazonaws.sagemakeredge#DeploymentModels", + "traits": { + "smithy.api#documentation": "

        Returns a list of models deployed on the agent.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        Information about the result of a deployment on an edge device that is registered with SageMaker Edge Manager.

        " + } + }, + "com.amazonaws.sagemakeredge#DeploymentStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SUCCESS", + "name": "Success" + }, + { + "value": "FAIL", + "name": "Fail" + } + ] + } + }, + "com.amazonaws.sagemakeredge#DeploymentType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Model", + "name": "Model" + } + ] + } + }, "com.amazonaws.sagemakeredge#DeviceFleetName": { "type": "string", "traits": { @@ -104,6 +318,44 @@ "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9\\/])*$" } }, + "com.amazonaws.sagemakeredge#EdgeDeployment": { + "type": "structure", + "members": { + "DeploymentName": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "

        The name and unique ID of the deployment.

        " + } + }, + "Type": { + "target": "com.amazonaws.sagemakeredge#DeploymentType", + "traits": { + "smithy.api#documentation": "

        The type of the deployment.

        " + } + }, + "FailureHandlingPolicy": { + "target": "com.amazonaws.sagemakeredge#FailureHandlingPolicy", + "traits": { + "smithy.api#documentation": "

        Determines whether to rollback to previous configuration if deployment fails.

        " + } + }, + "Definitions": { + "target": "com.amazonaws.sagemakeredge#Definitions", + "traits": { + "smithy.api#documentation": "

        Returns a list of Definition objects.

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        Information about a deployment on an edge device that is registered with SageMaker Edge Manager.

        " + } + }, + "com.amazonaws.sagemakeredge#EdgeDeployments": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakeredge#EdgeDeployment" + } + }, "com.amazonaws.sagemakeredge#EdgeMetric": { "type": "structure", "members": { @@ -142,9 +394,86 @@ "target": "com.amazonaws.sagemakeredge#EdgeMetric" } }, + "com.amazonaws.sagemakeredge#EntityName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 63 + }, + "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + } + }, "com.amazonaws.sagemakeredge#ErrorMessage": { "type": "string" }, + "com.amazonaws.sagemakeredge#FailureHandlingPolicy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ROLLBACK_ON_FAILURE", + "name": "RollbackOnFailure" + }, + { + "value": "DO_NOTHING", + "name": "DoNothing" + } + ] + } + }, + "com.amazonaws.sagemakeredge#GetDeployments": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemakeredge#GetDeploymentsRequest" + }, + "output": { + "target": "com.amazonaws.sagemakeredge#GetDeploymentsResult" + }, + "errors": [ + { + "target": "com.amazonaws.sagemakeredge#InternalServiceException" + } + ], + "traits": { + "smithy.api#documentation": "

        Use to get the active deployments from a device.

        ", + "smithy.api#http": { + "method": "POST", + "uri": "/GetDeployments", + "code": 200 + } + } + }, + "com.amazonaws.sagemakeredge#GetDeploymentsRequest": { + "type": "structure", + "members": { + "DeviceName": { + "target": "com.amazonaws.sagemakeredge#DeviceName", + "traits": { + "smithy.api#documentation": "

        The unique name of the device you want to get the configuration of active deployments from.

        ", + "smithy.api#required": {} + } + }, + "DeviceFleetName": { + "target": "com.amazonaws.sagemakeredge#DeviceFleetName", + "traits": { + "smithy.api#documentation": "

        The name of the fleet that the device belongs to.

        ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.sagemakeredge#GetDeploymentsResult": { + "type": "structure", + "members": { + "Deployments": { + "target": "com.amazonaws.sagemakeredge#EdgeDeployments", + "traits": { + "smithy.api#documentation": "

        Returns a list of the configurations of the active deployments on the device.

        " + } + } + } + }, "com.amazonaws.sagemakeredge#GetDeviceRegistration": { "type": "operation", "input": { @@ -211,7 +540,7 @@ } }, "traits": { - "smithy.api#documentation": "

        An internal failure occurred. Try your request again. If the problem \n persists, contact AWS customer support.

        ", + "smithy.api#documentation": "

        An internal failure occurred. Try your request again. If the problem \n persists, contact Amazon Web Services customer support.

        ", "smithy.api#error": "client" } }, @@ -273,17 +602,45 @@ "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, + "com.amazonaws.sagemakeredge#ModelState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DEPLOY", + "name": "Deploy" + }, + { + "value": "UNDEPLOY", + "name": "Undeploy" + } + ] + } + }, "com.amazonaws.sagemakeredge#Models": { "type": "list", "member": { "target": "com.amazonaws.sagemakeredge#Model" } }, + "com.amazonaws.sagemakeredge#S3Uri": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^s3://([^/]+)/?(.*)$" + } + }, "com.amazonaws.sagemakeredge#SendHeartbeat": { "type": "operation", "input": { "target": "com.amazonaws.sagemakeredge#SendHeartbeatRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.sagemakeredge#InternalServiceException" @@ -333,9 +690,18 @@ "smithy.api#documentation": "

        The name of the fleet that the device belongs to.

        ", "smithy.api#required": {} } + }, + "DeploymentResult": { + "target": "com.amazonaws.sagemakeredge#DeploymentResult", + "traits": { + "smithy.api#documentation": "

        Returns the result of a deployment on the device.

        " + } } } }, + "com.amazonaws.sagemakeredge#String": { + "type": "string" + }, "com.amazonaws.sagemakeredge#Timestamp": { "type": "timestamp" }, @@ -349,7 +715,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": "[a-zA-Z0-9\\ \\_\\.]+" + "smithy.api#pattern": "^[a-zA-Z0-9\\ \\_\\.]+$" } } } From 559d71c013f09729128e24ae32a8e0f1090c8520 Mon Sep 17 00:00:00 2001 From: awstools Date: Tue, 19 Jul 2022 18:39:01 +0000 Subject: [PATCH 15/31] Publish v3.133.0 --- CHANGELOG.md | 14 ++++++++++++++ clients/client-devops-guru/CHANGELOG.md | 11 +++++++++++ clients/client-devops-guru/package.json | 2 +- clients/client-glue/CHANGELOG.md | 8 ++++++++ clients/client-glue/package.json | 2 +- clients/client-sagemaker-edge/CHANGELOG.md | 11 +++++++++++ clients/client-sagemaker-edge/package.json | 2 +- clients/client-sagemaker/CHANGELOG.md | 11 +++++++++++ clients/client-sagemaker/package.json | 2 +- clients/client-workspaces/CHANGELOG.md | 11 +++++++++++ clients/client-workspaces/package.json | 2 +- lerna.json | 2 +- 12 files changed, 72 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6733b77dafe75..bd0e08feec1e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,20 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.133.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.132.0...v3.133.0) (2022-07-19) + + +### Features + +* **client-devops-guru:** Added new APIs for log anomaly detection feature. ([90ed85b](https://github.com/aws/aws-sdk-js-v3/commit/90ed85b6817d3f035124d62818670b3b4807c492)) +* **client-sagemaker-edge:** Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. ([316a691](https://github.com/aws/aws-sdk-js-v3/commit/316a691491232facc572b8482f9084355702bfbf)) +* **client-sagemaker:** Fixed an issue with cross account QueryLineage ([ed39bc2](https://github.com/aws/aws-sdk-js-v3/commit/ed39bc2c1bd0d2bad021a043b7642fae5ea9263d)) +* **client-workspaces:** Increased the character limit of the login message from 850 to 2000 characters. ([6481e59](https://github.com/aws/aws-sdk-js-v3/commit/6481e598bf2360dfe18714996867eeed605a6f82)) + + + + + # [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) diff --git a/clients/client-devops-guru/CHANGELOG.md b/clients/client-devops-guru/CHANGELOG.md index a734a1c284458..52d4a9ca19ed4 100644 --- a/clients/client-devops-guru/CHANGELOG.md +++ b/clients/client-devops-guru/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.133.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.132.0...v3.133.0) (2022-07-19) + + +### Features + +* **client-devops-guru:** Added new APIs for log anomaly detection feature. ([90ed85b](https://github.com/aws/aws-sdk-js-v3/commit/90ed85b6817d3f035124d62818670b3b4807c492)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) diff --git a/clients/client-devops-guru/package.json b/clients/client-devops-guru/package.json index ed538c32fa54e..ab6bec2e6e462 100644 --- a/clients/client-devops-guru/package.json +++ b/clients/client-devops-guru/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-devops-guru", "description": "AWS SDK for JavaScript Devops Guru Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.133.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-glue/CHANGELOG.md b/clients/client-glue/CHANGELOG.md index 61373d30c6a4e..f80bc7cec301f 100644 --- a/clients/client-glue/CHANGELOG.md +++ b/clients/client-glue/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.133.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.132.0...v3.133.0) (2022-07-19) + +**Note:** Version bump only for package @aws-sdk/client-glue + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-glue diff --git a/clients/client-glue/package.json b/clients/client-glue/package.json index f0de0c0024dde..b4c5b90eb47d0 100644 --- a/clients/client-glue/package.json +++ b/clients/client-glue/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-glue", "description": "AWS SDK for JavaScript Glue Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.133.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-sagemaker-edge/CHANGELOG.md b/clients/client-sagemaker-edge/CHANGELOG.md index 4e9e2ef3bdcef..e6abceba18d52 100644 --- a/clients/client-sagemaker-edge/CHANGELOG.md +++ b/clients/client-sagemaker-edge/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.133.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.132.0...v3.133.0) (2022-07-19) + + +### Features + +* **client-sagemaker-edge:** Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. ([316a691](https://github.com/aws/aws-sdk-js-v3/commit/316a691491232facc572b8482f9084355702bfbf)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-sagemaker-edge diff --git a/clients/client-sagemaker-edge/package.json b/clients/client-sagemaker-edge/package.json index 558c5e60faf9a..0f9ecb7b510a4 100644 --- a/clients/client-sagemaker-edge/package.json +++ b/clients/client-sagemaker-edge/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-sagemaker-edge", "description": "AWS SDK for JavaScript Sagemaker Edge Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.133.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-sagemaker/CHANGELOG.md b/clients/client-sagemaker/CHANGELOG.md index 579855cb368aa..55da61be06f6d 100644 --- a/clients/client-sagemaker/CHANGELOG.md +++ b/clients/client-sagemaker/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.133.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.132.0...v3.133.0) (2022-07-19) + + +### Features + +* **client-sagemaker:** Fixed an issue with cross account QueryLineage ([ed39bc2](https://github.com/aws/aws-sdk-js-v3/commit/ed39bc2c1bd0d2bad021a043b7642fae5ea9263d)) + + + + + # [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) diff --git a/clients/client-sagemaker/package.json b/clients/client-sagemaker/package.json index ac1285425620b..5658041b5dfab 100644 --- a/clients/client-sagemaker/package.json +++ b/clients/client-sagemaker/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-sagemaker", "description": "AWS SDK for JavaScript Sagemaker Client for Node.js, Browser and React Native", - "version": "3.132.0", + "version": "3.133.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-workspaces/CHANGELOG.md b/clients/client-workspaces/CHANGELOG.md index 9329f2e363821..d98159b08b139 100644 --- a/clients/client-workspaces/CHANGELOG.md +++ b/clients/client-workspaces/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.133.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.132.0...v3.133.0) (2022-07-19) + + +### Features + +* **client-workspaces:** Increased the character limit of the login message from 850 to 2000 characters. ([6481e59](https://github.com/aws/aws-sdk-js-v3/commit/6481e598bf2360dfe18714996867eeed605a6f82)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-workspaces diff --git a/clients/client-workspaces/package.json b/clients/client-workspaces/package.json index 4fc89b2b1347f..a8f4e063497ff 100644 --- a/clients/client-workspaces/package.json +++ b/clients/client-workspaces/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-workspaces", "description": "AWS SDK for JavaScript Workspaces Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.133.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/lerna.json b/lerna.json index 02143bc0cfd2a..6c4ba7606a5c1 100644 --- a/lerna.json +++ b/lerna.json @@ -1,5 +1,5 @@ { - "version": "3.132.0", + "version": "3.133.0", "npmClient": "yarn", "useWorkspaces": true, "command": { From 358a73e37eca1782a115e9c304bdc27889fc7026 Mon Sep 17 00:00:00 2001 From: awstools Date: Wed, 20 Jul 2022 18:23:00 +0000 Subject: [PATCH 16/31] feat(client-iot): GA release the ability to enable/disable IoT Fleet Indexing for Device Defender and Named Shadow information, and search them through IoT Fleet Indexing APIs. This includes Named Shadow Selection as a part of the UpdateIndexingConfiguration API. --- clients/client-iot/src/IoT.ts | 3 +- .../commands/AttachPrincipalPolicyCommand.ts | 3 +- .../ListThingGroupsForThingCommand.ts | 3 +- clients/client-iot/src/models/models_1.ts | 52 +++++++++++-------- clients/client-iot/src/models/models_2.ts | 22 ++++++++ .../client-iot/src/protocols/Aws_restJson1.ts | 43 +++++++++++++++ codegen/sdk-codegen/aws-models/iot.json | 38 +++++++++++++- 7 files changed, 139 insertions(+), 25 deletions(-) diff --git a/clients/client-iot/src/IoT.ts b/clients/client-iot/src/IoT.ts index 17928fb3d91e7..2c89b5ba21741 100644 --- a/clients/client-iot/src/IoT.ts +++ b/clients/client-iot/src/IoT.ts @@ -1331,7 +1331,8 @@ export class IoT extends IoTClient { *

        Attaches the specified policy to the specified principal (certificate or other * credential).

        *

        - * Note: This action is deprecated. Please use AttachPolicy instead.

        + * Note: This action is deprecated and works as + * expected for backward compatibility, but we won't add enhancements. Use AttachPolicy instead.

        *

        Requires permission to access the AttachPrincipalPolicy action.

        */ public attachPrincipalPolicy( diff --git a/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts b/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts index 8726d82600320..603940ea7676a 100644 --- a/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts +++ b/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts @@ -28,7 +28,8 @@ export interface AttachPrincipalPolicyCommandOutput extends __MetadataBearer {} *

        Attaches the specified policy to the specified principal (certificate or other * credential).

        *

        - * Note: This action is deprecated. Please use AttachPolicy instead.

        + * Note: This action is deprecated and works as + * expected for backward compatibility, but we won't add enhancements. Use AttachPolicy instead.

        *

        Requires permission to access the AttachPrincipalPolicy action.

        * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts b/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts index ef010b34bb60f..bd513d1b7f893 100644 --- a/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts +++ b/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts @@ -13,7 +13,8 @@ import { } from "@aws-sdk/types"; import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { ListThingGroupsForThingRequest, ListThingGroupsForThingResponse } from "../models/models_1"; +import { ListThingGroupsForThingRequest } from "../models/models_1"; +import { ListThingGroupsForThingResponse } from "../models/models_2"; import { deserializeAws_restJson1ListThingGroupsForThingCommand, serializeAws_restJson1ListThingGroupsForThingCommand, diff --git a/clients/client-iot/src/models/models_1.ts b/clients/client-iot/src/models/models_1.ts index bce1915d84561..4abf0e237ff7e 100644 --- a/clients/client-iot/src/models/models_1.ts +++ b/clients/client-iot/src/models/models_1.ts @@ -4181,6 +4181,30 @@ export enum DeviceDefenderIndexingMode { VIOLATIONS = "VIOLATIONS", } +/** + *

        Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter. + * To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode to be ON and + * specify your shadow names in filter.

        + */ +export interface IndexingFilter { + /** + *

        The shadow names that you select to index. The default maximum number of shadow names for indexing is 10. To increase + * the limit, see Amazon Web Services IoT Device Management + * Quotas in the Amazon Web Services General Reference. + *

        + */ + namedShadowNames?: string[]; +} + +export namespace IndexingFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IndexingFilter): any => ({ + ...obj, + }); +} + export enum NamedShadowIndexingMode { OFF = "OFF", ON = "ON", @@ -4277,6 +4301,13 @@ export interface ThingIndexingConfiguration { *

        Contains custom field names and their data type.

        */ customFields?: Field[]; + + /** + *

        Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter. + * To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode to be ON and + * specify your shadow names in filter.

        + */ + filter?: IndexingFilter; } export namespace ThingIndexingConfiguration { @@ -8395,24 +8426,3 @@ export namespace ListThingGroupsForThingRequest { ...obj, }); } - -export interface ListThingGroupsForThingResponse { - /** - *

        The thing groups.

        - */ - thingGroups?: GroupNameAndArn[]; - - /** - *

        The token to use to get the next set of results, or null if there are no additional results.

        - */ - nextToken?: string; -} - -export namespace ListThingGroupsForThingResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListThingGroupsForThingResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-iot/src/models/models_2.ts b/clients/client-iot/src/models/models_2.ts index 2f65553f98bb6..2e3cd056246d8 100644 --- a/clients/client-iot/src/models/models_2.ts +++ b/clients/client-iot/src/models/models_2.ts @@ -49,6 +49,7 @@ import { Configuration, DetectMitigationActionsTaskTarget, DomainConfigurationStatus, + GroupNameAndArn, LogTargetType, RegistrationConfig, Status, @@ -58,6 +59,27 @@ import { ViolationEventOccurrenceRange, } from "./models_1"; +export interface ListThingGroupsForThingResponse { + /** + *

        The thing groups.

        + */ + thingGroups?: GroupNameAndArn[]; + + /** + *

        The token to use to get the next set of results, or null if there are no additional results.

        + */ + nextToken?: string; +} + +export namespace ListThingGroupsForThingResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListThingGroupsForThingResponse): any => ({ + ...obj, + }); +} + /** *

        The input for the ListThingPrincipal operation.

        */ diff --git a/clients/client-iot/src/protocols/Aws_restJson1.ts b/clients/client-iot/src/protocols/Aws_restJson1.ts index 64c4ced0aab26..630e41d22bffe 100644 --- a/clients/client-iot/src/protocols/Aws_restJson1.ts +++ b/clients/client-iot/src/protocols/Aws_restJson1.ts @@ -837,6 +837,7 @@ import { Field, FleetMetricNameAndArn, GroupNameAndArn, + IndexingFilter, InternalServerException, Job, JobExecution, @@ -24326,6 +24327,14 @@ const serializeAws_restJson1HttpUrlDestinationConfiguration = ( }; }; +const serializeAws_restJson1IndexingFilter = (input: IndexingFilter, context: __SerdeContext): any => { + return { + ...(input.namedShadowNames != null && { + namedShadowNames: serializeAws_restJson1NamedShadowNamesFilter(input.namedShadowNames, context), + }), + }; +}; + const serializeAws_restJson1IotAnalyticsAction = (input: IotAnalyticsAction, context: __SerdeContext): any => { return { ...(input.batchMode != null && { batchMode: input.batchMode }), @@ -24523,6 +24532,17 @@ const serializeAws_restJson1MqttContext = (input: MqttContext, context: __SerdeC }; }; +const serializeAws_restJson1NamedShadowNamesFilter = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1NumberList = (input: number[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -25095,6 +25115,7 @@ const serializeAws_restJson1ThingIndexingConfiguration = ( return { ...(input.customFields != null && { customFields: serializeAws_restJson1Fields(input.customFields, context) }), ...(input.deviceDefenderIndexingMode != null && { deviceDefenderIndexingMode: input.deviceDefenderIndexingMode }), + ...(input.filter != null && { filter: serializeAws_restJson1IndexingFilter(input.filter, context) }), ...(input.managedFields != null && { managedFields: serializeAws_restJson1Fields(input.managedFields, context) }), ...(input.namedShadowIndexingMode != null && { namedShadowIndexingMode: input.namedShadowIndexingMode }), ...(input.thingConnectivityIndexingMode != null && { @@ -26777,6 +26798,15 @@ const deserializeAws_restJson1ImplicitDeny = (output: any, context: __SerdeConte } as any; }; +const deserializeAws_restJson1IndexingFilter = (output: any, context: __SerdeContext): IndexingFilter => { + return { + namedShadowNames: + output.namedShadowNames != null + ? deserializeAws_restJson1NamedShadowNamesFilter(output.namedShadowNames, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1IndexNamesList = (output: any, context: __SerdeContext): string[] => { const retVal = (output || []) .filter((e: any) => e != null) @@ -27353,6 +27383,18 @@ const deserializeAws_restJson1MitigationActionParams = ( } as any; }; +const deserializeAws_restJson1NamedShadowNamesFilter = (output: any, context: __SerdeContext): string[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); + return retVal; +}; + const deserializeAws_restJson1NonCompliantResource = (output: any, context: __SerdeContext): NonCompliantResource => { return { additionalInfo: @@ -28581,6 +28623,7 @@ const deserializeAws_restJson1ThingIndexingConfiguration = ( customFields: output.customFields != null ? deserializeAws_restJson1Fields(output.customFields, context) : undefined, deviceDefenderIndexingMode: __expectString(output.deviceDefenderIndexingMode), + filter: output.filter != null ? deserializeAws_restJson1IndexingFilter(output.filter, context) : undefined, managedFields: output.managedFields != null ? deserializeAws_restJson1Fields(output.managedFields, context) : undefined, namedShadowIndexingMode: __expectString(output.namedShadowIndexingMode), diff --git a/codegen/sdk-codegen/aws-models/iot.json b/codegen/sdk-codegen/aws-models/iot.json index 36cabcaa02df2..5819e250b6a4e 100644 --- a/codegen/sdk-codegen/aws-models/iot.json +++ b/codegen/sdk-codegen/aws-models/iot.json @@ -1802,7 +1802,7 @@ ], "traits": { "smithy.api#deprecated": {}, - "smithy.api#documentation": "

        Attaches the specified policy to the specified principal (certificate or other\n credential).

        \n

        \n Note: This action is deprecated. Please use AttachPolicy instead.

        \n

        Requires permission to access the AttachPrincipalPolicy action.

        ", + "smithy.api#documentation": "

        Attaches the specified policy to the specified principal (certificate or other\n credential).

        \n

        \n Note: This action is deprecated and works as\n expected for backward compatibility, but we won't add enhancements. Use AttachPolicy instead.

        \n

        Requires permission to access the AttachPrincipalPolicy action.

        ", "smithy.api#http": { "method": "PUT", "uri": "/principal-policies/{policyName}", @@ -15615,6 +15615,20 @@ ] } }, + "com.amazonaws.iot#IndexingFilter": { + "type": "structure", + "members": { + "namedShadowNames": { + "target": "com.amazonaws.iot#NamedShadowNamesFilter", + "traits": { + "smithy.api#documentation": "

        The shadow names that you select to index. The default maximum number of shadow names for indexing is 10. To increase \n the limit, see Amazon Web Services IoT Device Management \n Quotas in the Amazon Web Services General Reference.\n

        " + } + } + }, + "traits": { + "smithy.api#documentation": "

        Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter.\n To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode to be ON and \n specify your shadow names in filter.

        " + } + }, "com.amazonaws.iot#InlineDocument": { "type": "string" }, @@ -22385,6 +22399,12 @@ ] } }, + "com.amazonaws.iot#NamedShadowNamesFilter": { + "type": "list", + "member": { + "target": "com.amazonaws.iot#ShadowName" + } + }, "com.amazonaws.iot#NamespaceId": { "type": "string", "traits": { @@ -25598,6 +25618,16 @@ } } }, + "com.amazonaws.iot#ShadowName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[a-zA-Z0-9:_-]+$" + } + }, "com.amazonaws.iot#SigV4Authorization": { "type": "structure", "members": { @@ -27474,6 +27504,12 @@ "traits": { "smithy.api#documentation": "

        Contains custom field names and their data type.

        " } + }, + "filter": { + "target": "com.amazonaws.iot#IndexingFilter", + "traits": { + "smithy.api#documentation": "

        Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter.\n To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode to be ON and \n specify your shadow names in filter.

        " + } } }, "traits": { From 6447aca7dc9d0c87c3573b973bb2bcf3ded143af Mon Sep 17 00:00:00 2001 From: awstools Date: Wed, 20 Jul 2022 18:23:00 +0000 Subject: [PATCH 17/31] docs(client-acm-pca): AWS Certificate Manager (ACM) Private Certificate Authority (PCA) documentation updates --- clients/client-acm-pca/README.md | 1 - clients/client-acm-pca/src/ACMPCA.ts | 1 - clients/client-acm-pca/src/ACMPCAClient.ts | 1 - clients/client-acm-pca/src/models/models_0.ts | 73 +++++++------------ codegen/sdk-codegen/aws-models/acm-pca.json | 55 +++++++++++--- 5 files changed, 72 insertions(+), 59 deletions(-) diff --git a/clients/client-acm-pca/README.md b/clients/client-acm-pca/README.md index af45c44b3ae5d..ef0891e01576f 100644 --- a/clients/client-acm-pca/README.md +++ b/clients/client-acm-pca/README.md @@ -22,7 +22,6 @@ depending on the operation. Throttling means that ACM Private CA rejects an othe request because the request exceeds the operation's quota for the number of requests per second. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request rate for APIs.

        -

        To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase, log into your Amazon Web Services account and visit the Service Quotas console.

        diff --git a/clients/client-acm-pca/src/ACMPCA.ts b/clients/client-acm-pca/src/ACMPCA.ts index 6531ecdc08ccb..c478e5b669cfd 100644 --- a/clients/client-acm-pca/src/ACMPCA.ts +++ b/clients/client-acm-pca/src/ACMPCA.ts @@ -120,7 +120,6 @@ import { * request because the request exceeds the operation's quota for the number of requests per * second. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request * rate for APIs.

        - * *

        To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase, * log into your Amazon Web Services account and visit the Service Quotas * console.

        diff --git a/clients/client-acm-pca/src/ACMPCAClient.ts b/clients/client-acm-pca/src/ACMPCAClient.ts index 177eef6c20351..d4ec18c8d9bef 100644 --- a/clients/client-acm-pca/src/ACMPCAClient.ts +++ b/clients/client-acm-pca/src/ACMPCAClient.ts @@ -334,7 +334,6 @@ export interface ACMPCAClientResolvedConfig extends ACMPCAClientResolvedConfigTy * request because the request exceeds the operation's quota for the number of requests per * second. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request * rate for APIs.

        - * *

        To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase, * log into your Amazon Web Services account and visit the Service Quotas * console.

        diff --git a/clients/client-acm-pca/src/models/models_0.ts b/clients/client-acm-pca/src/models/models_0.ts index 31453c3b2c72a..4dc3caedf3c79 100644 --- a/clients/client-acm-pca/src/models/models_0.ts +++ b/clients/client-acm-pca/src/models/models_0.ts @@ -8,17 +8,14 @@ import { ACMPCAServiceException as __BaseException } from "./ACMPCAServiceExcept */ export interface CustomAttribute { /** - *

        Specifies the object identifier (OID) of the attribute type of - * the - * relative distinguished name - * (RDN).

        + *

        Specifies the object identifier (OID) of the attribute type of the relative + * distinguished name (RDN).

        */ ObjectIdentifier: string | undefined; /** *

        - *

        Specifies the attribute value of relative distinguished name - * (RDN).

        + *

        Specifies the attribute value of relative distinguished name (RDN).

        */ Value: string | undefined; } @@ -125,16 +122,9 @@ export interface ASN1Subject { /** *

        - *

        Contains a sequence of one or more X.500 relative distinguished - * names - * (RDNs), - * each of which consists of an object identifier (OID) and - * a - * value. For more information, see NIST’s definition of - * Object - * Identifier - * (OID).

        - * + *

        Contains a sequence of one or more X.500 relative distinguished names (RDNs), each of + * which consists of an object identifier (OID) and a value. For more information, see + * NIST’s definition of Object Identifier (OID).

        * *

        Custom attributes cannot be used in combination with standard attributes.

        *
        @@ -504,11 +494,9 @@ export enum S3ObjectAcl { * certificate's expiration date or when a certificate is revoked. When a certificate is * revoked, it appears in the CRL until the certificate expires, and then in one additional * CRL after expiration, and it always appears in the audit report.

        - * *

        A CRL is typically updated approximately 30 minutes after a certificate * is revoked. If for any reason a CRL update fails, ACM Private CA makes further attempts * every 15 minutes.

        - * *

        CRLs contain the following fields:

        *
          *
        • @@ -792,11 +780,22 @@ export interface CreateCertificateAuthorityRequest { IdempotencyToken?: string; /** - *

          Specifies a - * cryptographic key management compliance standard used for handling CA keys.

          + *

          Specifies a cryptographic key management compliance standard used for handling CA + * keys.

          *

          Default: FIPS_140_2_LEVEL_3_OR_HIGHER

          - *

          Note: FIPS_140_2_LEVEL_3_OR_HIGHER is not supported in Region - * ap-northeast-3. When creating a CA in the ap-northeast-3, you must provide + *

          + * Note: + * FIPS_140_2_LEVEL_3_OR_HIGHER is not supported in the following + * Regions:

          + *
            + *
          • + *

            ap-northeast-3

            + *
          • + *
          • + *

            ap-southeast-3

            + *
          • + *
          + *

          When creating a CA in these Regions, you must provide * FIPS_140_2_LEVEL_2_OR_HIGHER as the argument for * KeyStorageSecurityStandard. Failure to do this results in an * InvalidArgsException with the message, "A certificate authority cannot @@ -1819,8 +1818,7 @@ export namespace PolicyInformation { /** *

          - *

          Specifies the X.509 extension information for a - * certificate.

          + *

          Specifies the X.509 extension information for a certificate.

          *

          Extensions present in CustomExtensions follow the * ApiPassthrough * template @@ -1829,27 +1827,21 @@ export namespace PolicyInformation { export interface CustomExtension { /** *

          - *

          Specifies the object identifier (OID) of the X.509 extension. For more information, - * see the - * Global OID reference - * database. + *

          Specifies the object identifier (OID) of the X.509 extension. For more information, see the + * Global OID reference database. *

          */ ObjectIdentifier: string | undefined; /** *

          - *

          Specifies the base64-encoded value of the X.509 - * extension.

          + *

          Specifies the base64-encoded value of the X.509 extension.

          */ Value: string | undefined; /** *

          - *

          Specifies the critical flag of - * the - * X.509 - * extension.

          + *

          Specifies the critical flag of the X.509 extension.

          */ Critical?: boolean; } @@ -1940,19 +1932,10 @@ export interface Extensions { /** *

          *

          Contains a sequence of one or more X.509 extensions, each of which consists of an - * object identifier (OID), a base64-encoded - * value, - * and the - * critical flag. - * For - * more information, see the Global OID reference + * object identifier (OID), a base64-encoded value, and the critical flag. For more + * information, see the Global OID reference * database. *

          - * - * - *

          The OID value of a CustomExtension must not - * match the OID of a predefined extension.

          - *
          */ CustomExtensions?: CustomExtension[]; } diff --git a/codegen/sdk-codegen/aws-models/acm-pca.json b/codegen/sdk-codegen/aws-models/acm-pca.json index b887c8dd52a9b..80a163ba78f6b 100644 --- a/codegen/sdk-codegen/aws-models/acm-pca.json +++ b/codegen/sdk-codegen/aws-models/acm-pca.json @@ -43,7 +43,7 @@ "name": "acm-pca" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

          This is the Certificate Manager Private Certificate Authority (PCA) API Reference. It provides descriptions,\n\t\t\tsyntax, and usage examples for each of the actions and data types involved in creating\n\t\t\tand managing a private certificate authority (CA) for your organization.

          \n\t\t

          The documentation for each action shows the API request parameters and the JSON\n\t\t\tresponse. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is\n\t\t\ttailored to the programming language or platform that you prefer. For more information,\n\t\t\tsee Amazon Web Services SDKs.

          \n\t\t

          Each ACM Private CA API operation has a quota that determines the number of times the\n\t\t\toperation can be called per second. ACM Private CA throttles API requests at different rates\n\t\t\tdepending on the operation. Throttling means that ACM Private CA rejects an otherwise valid\n\t\t\trequest because the request exceeds the operation's quota for the number of requests per\n\t\t\tsecond. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request\n\t\t\trate for APIs.

          \n\n\t\t

          To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase,\n\t\t\tlog into your Amazon Web Services account and visit the Service Quotas\n\t\t\tconsole.

          ", + "smithy.api#documentation": "

          This is the Certificate Manager Private Certificate Authority (PCA) API Reference. It provides descriptions,\n\t\t\tsyntax, and usage examples for each of the actions and data types involved in creating\n\t\t\tand managing a private certificate authority (CA) for your organization.

          \n\t\t

          The documentation for each action shows the API request parameters and the JSON\n\t\t\tresponse. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is\n\t\t\ttailored to the programming language or platform that you prefer. For more information,\n\t\t\tsee Amazon Web Services SDKs.

          \n\t\t

          Each ACM Private CA API operation has a quota that determines the number of times the\n\t\t\toperation can be called per second. ACM Private CA throttles API requests at different rates\n\t\t\tdepending on the operation. Throttling means that ACM Private CA rejects an otherwise valid\n\t\t\trequest because the request exceeds the operation's quota for the number of requests per\n\t\t\tsecond. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request\n\t\t\trate for APIs.

          \n\t\t

          To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase,\n\t\t\tlog into your Amazon Web Services account and visit the Service Quotas\n\t\t\tconsole.

          ", "smithy.api#title": "AWS Certificate Manager Private Certificate Authority" }, "version": "2017-08-22", @@ -219,7 +219,7 @@ "CustomAttributes": { "target": "com.amazonaws.acmpca#CustomAttributeList", "traits": { - "smithy.api#documentation": "

          \n\t\t

          Contains a sequence of one or more X.500 relative distinguished\n\t\t\tnames\n\t\t\t(RDNs),\n\t\t\teach of which consists of an object identifier (OID) and\n\t\t\ta\n\t\t\tvalue. For more information, see NIST’s definition of \n\t\t\tObject\n\t\t\t\tIdentifier\n\t\t\t\t(OID).

          \n\t\t\n\t\t \n\t\t\t

          Custom attributes cannot be used in combination with standard attributes.

          \n\t\t
          " + "smithy.api#documentation": "

          \n\t\t

          Contains a sequence of one or more X.500 relative distinguished names (RDNs), each of\n\t\t\twhich consists of an object identifier (OID) and a value. For more information, see\n\t\t\tNIST’s definition of Object Identifier (OID).

          \n\t\t \n\t\t\t

          Custom attributes cannot be used in combination with standard attributes.

          \n\t\t
          " } } }, @@ -817,7 +817,7 @@ "KeyStorageSecurityStandard": { "target": "com.amazonaws.acmpca#KeyStorageSecurityStandard", "traits": { - "smithy.api#documentation": "

          Specifies a\n\t\t\tcryptographic key management compliance standard used for handling CA keys.

          \n\t\t

          Default: FIPS_140_2_LEVEL_3_OR_HIGHER

          \n\t\t

          Note: FIPS_140_2_LEVEL_3_OR_HIGHER is not supported in Region\n\t\t\tap-northeast-3. When creating a CA in the ap-northeast-3, you must provide\n\t\t\t\tFIPS_140_2_LEVEL_2_OR_HIGHER as the argument for\n\t\t\t\tKeyStorageSecurityStandard. Failure to do this results in an\n\t\t\t\tInvalidArgsException with the message, \"A certificate authority cannot\n\t\t\tbe created in this region with the specified security standard.\"

          " + "smithy.api#documentation": "

          Specifies a cryptographic key management compliance standard used for handling CA\n\t\t\tkeys.

          \n\t\t

          Default: FIPS_140_2_LEVEL_3_OR_HIGHER

          \n\t\t

          \n Note:\n\t\t\t FIPS_140_2_LEVEL_3_OR_HIGHER is not supported in the following\n\t\t\tRegions:

          \n\t\t
            \n
          • \n\t\t\t\t

            ap-northeast-3

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            ap-southeast-3

            \n\t\t\t
          • \n
          \n\t\t

          When creating a CA in these Regions, you must provide\n\t\t\t\tFIPS_140_2_LEVEL_2_OR_HIGHER as the argument for\n\t\t\t\tKeyStorageSecurityStandard. Failure to do this results in an\n\t\t\t\tInvalidArgsException with the message, \"A certificate authority cannot\n\t\t\tbe created in this region with the specified security standard.\"

          " } }, "Tags": { @@ -844,6 +844,9 @@ "input": { "target": "com.amazonaws.acmpca#CreatePermissionRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#InvalidArnException" @@ -938,7 +941,7 @@ } }, "traits": { - "smithy.api#documentation": "

          Contains configuration information for a certificate revocation list (CRL). Your\n\t\t\tprivate certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You\n\t\t\tcan enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA\n\t\t\twrites CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by\n\t\t\tspecifying a value for the CustomCname parameter. Your\n\t\t\tprivate CA copies the CNAME or the S3 bucket name to the CRL\n\t\t\t\tDistribution Points extension of each certificate it issues. Your S3\n\t\t\tbucket policy must give write permission to ACM Private CA.

          \n\t\t

          ACM Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your\n\t\t\tCRLs.

          \n\t\t

          Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a\n\t\t\tcertificate's expiration date or when a certificate is revoked. When a certificate is\n\t\t\trevoked, it appears in the CRL until the certificate expires, and then in one additional\n\t\t\tCRL after expiration, and it always appears in the audit report.

          \n\n\t\t

          A CRL is typically updated approximately 30 minutes after a certificate \n\tis revoked. If for any reason a CRL update fails, ACM Private CA makes further attempts \n\tevery 15 minutes.

          \n\n\t\t

          CRLs contain the following fields:

          \n\t\t
            \n
          • \n\t\t\t\t

            \n Version: The current version number defined\n\t\t\t\t\tin RFC 5280 is V2. The integer value is 0x1.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Signature Algorithm: The name of the\n\t\t\t\t\talgorithm used to sign the CRL.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Issuer: The X.500 distinguished name of your\n\t\t\t\t\tprivate CA that issued the CRL.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Last Update: The issue date and time of this\n\t\t\t\t\tCRL.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Next Update: The day and time by which the\n\t\t\t\t\tnext CRL will be issued.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Revoked Certificates: List of revoked\n\t\t\t\t\tcertificates. Each list item contains the following information.

            \n\t\t\t\t
              \n
            • \n\t\t\t\t\t\t

              \n Serial Number: The serial number, in\n\t\t\t\t\t\t\thexadecimal format, of the revoked certificate.

              \n\t\t\t\t\t
            • \n
            • \n\t\t\t\t\t\t

              \n Revocation Date: Date and time the\n\t\t\t\t\t\t\tcertificate was revoked.

              \n\t\t\t\t\t
            • \n
            • \n\t\t\t\t\t\t

              \n CRL Entry Extensions: Optional\n\t\t\t\t\t\t\textensions for the CRL entry.

              \n\t\t\t\t\t\t
                \n
              • \n\t\t\t\t\t\t\t\t

                \n X509v3 CRL Reason Code:\n\t\t\t\t\t\t\t\t\tReason the certificate was revoked.

                \n\t\t\t\t\t\t\t
              • \n
              \n\t\t\t\t\t
            • \n
            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n CRL Extensions: Optional extensions for the\n\t\t\t\t\tCRL.

            \n\t\t\t\t
              \n
            • \n\t\t\t\t\t\t

              \n X509v3 Authority Key Identifier:\n\t\t\t\t\t\t\tIdentifies the public key associated with the private key used to sign\n\t\t\t\t\t\t\tthe certificate.

              \n\t\t\t\t\t
            • \n
            • \n\t\t\t\t\t\t

              \n X509v3 CRL Number:: Decimal sequence\n\t\t\t\t\t\t\tnumber for the CRL.

              \n\t\t\t\t\t
            • \n
            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Signature Algorithm: Algorithm used by your\n\t\t\t\t\tprivate CA to sign the CRL.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Signature Value: Signature computed over the\n\t\t\t\t\tCRL.

            \n\t\t\t
          • \n
          \n\t\t

          Certificate revocation lists created by ACM Private CA are DER-encoded. You can use the\n\t\t\tfollowing OpenSSL command to list a CRL.

          \n\t\t

          \n openssl crl -inform DER -text -in crl_path\n\t\t\t-noout\n

          \n\t\t

          For more information, see Planning a certificate revocation list\n\t\t\t\t(CRL) in the Certificate Manager Private Certificate Authority (PCA) User Guide\n

          " + "smithy.api#documentation": "

          Contains configuration information for a certificate revocation list (CRL). Your\n\t\t\tprivate certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You\n\t\t\tcan enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA\n\t\t\twrites CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by\n\t\t\tspecifying a value for the CustomCname parameter. Your\n\t\t\tprivate CA copies the CNAME or the S3 bucket name to the CRL\n\t\t\t\tDistribution Points extension of each certificate it issues. Your S3\n\t\t\tbucket policy must give write permission to ACM Private CA.

          \n\t\t

          ACM Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your\n\t\t\tCRLs.

          \n\t\t

          Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a\n\t\t\tcertificate's expiration date or when a certificate is revoked. When a certificate is\n\t\t\trevoked, it appears in the CRL until the certificate expires, and then in one additional\n\t\t\tCRL after expiration, and it always appears in the audit report.

          \n\t\t

          A CRL is typically updated approximately 30 minutes after a certificate \n\tis revoked. If for any reason a CRL update fails, ACM Private CA makes further attempts \n\tevery 15 minutes.

          \n\t\t

          CRLs contain the following fields:

          \n\t\t
            \n
          • \n\t\t\t\t

            \n Version: The current version number defined\n\t\t\t\t\tin RFC 5280 is V2. The integer value is 0x1.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Signature Algorithm: The name of the\n\t\t\t\t\talgorithm used to sign the CRL.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Issuer: The X.500 distinguished name of your\n\t\t\t\t\tprivate CA that issued the CRL.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Last Update: The issue date and time of this\n\t\t\t\t\tCRL.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Next Update: The day and time by which the\n\t\t\t\t\tnext CRL will be issued.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Revoked Certificates: List of revoked\n\t\t\t\t\tcertificates. Each list item contains the following information.

            \n\t\t\t\t
              \n
            • \n\t\t\t\t\t\t

              \n Serial Number: The serial number, in\n\t\t\t\t\t\t\thexadecimal format, of the revoked certificate.

              \n\t\t\t\t\t
            • \n
            • \n\t\t\t\t\t\t

              \n Revocation Date: Date and time the\n\t\t\t\t\t\t\tcertificate was revoked.

              \n\t\t\t\t\t
            • \n
            • \n\t\t\t\t\t\t

              \n CRL Entry Extensions: Optional\n\t\t\t\t\t\t\textensions for the CRL entry.

              \n\t\t\t\t\t\t
                \n
              • \n\t\t\t\t\t\t\t\t

                \n X509v3 CRL Reason Code:\n\t\t\t\t\t\t\t\t\tReason the certificate was revoked.

                \n\t\t\t\t\t\t\t
              • \n
              \n\t\t\t\t\t
            • \n
            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n CRL Extensions: Optional extensions for the\n\t\t\t\t\tCRL.

            \n\t\t\t\t
              \n
            • \n\t\t\t\t\t\t

              \n X509v3 Authority Key Identifier:\n\t\t\t\t\t\t\tIdentifies the public key associated with the private key used to sign\n\t\t\t\t\t\t\tthe certificate.

              \n\t\t\t\t\t
            • \n
            • \n\t\t\t\t\t\t

              \n X509v3 CRL Number:: Decimal sequence\n\t\t\t\t\t\t\tnumber for the CRL.

              \n\t\t\t\t\t
            • \n
            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Signature Algorithm: Algorithm used by your\n\t\t\t\t\tprivate CA to sign the CRL.

            \n\t\t\t
          • \n
          • \n\t\t\t\t

            \n Signature Value: Signature computed over the\n\t\t\t\t\tCRL.

            \n\t\t\t
          • \n
          \n\t\t

          Certificate revocation lists created by ACM Private CA are DER-encoded. You can use the\n\t\t\tfollowing OpenSSL command to list a CRL.

          \n\t\t

          \n openssl crl -inform DER -text -in crl_path\n\t\t\t-noout\n

          \n\t\t

          For more information, see Planning a certificate revocation list\n\t\t\t\t(CRL) in the Certificate Manager Private Certificate Authority (PCA) User Guide\n

          " } }, "com.amazonaws.acmpca#CsrBlob": { @@ -979,14 +982,14 @@ "ObjectIdentifier": { "target": "com.amazonaws.acmpca#CustomObjectIdentifier", "traits": { - "smithy.api#documentation": "

          Specifies the object identifier (OID) of the attribute type of\n\t\t\tthe\n\t\t\trelative distinguished name\n\t\t\t(RDN).

          ", + "smithy.api#documentation": "

          Specifies the object identifier (OID) of the attribute type of the relative\n\t\t\tdistinguished name (RDN).

          ", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.acmpca#String1To256", "traits": { - "smithy.api#documentation": "

          \n\t\t

          Specifies the attribute value of relative distinguished name\n\t\t\t(RDN).

          ", + "smithy.api#documentation": "

          \n\t\t

          Specifies the attribute value of relative distinguished name (RDN).

          ", "smithy.api#required": {} } } @@ -1013,14 +1016,14 @@ "ObjectIdentifier": { "target": "com.amazonaws.acmpca#CustomObjectIdentifier", "traits": { - "smithy.api#documentation": "

          \n\t\t

          Specifies the object identifier (OID) of the X.509 extension. For more information,\n\t\t\tsee the\n\t\t\t\tGlobal OID reference\n\t\t\t\tdatabase.\n\t\t

          ", + "smithy.api#documentation": "

          \n\t\t

          Specifies the object identifier (OID) of the X.509 extension. For more information, see the\n\t\t\t\tGlobal OID reference database.\n\t\t

          ", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.acmpca#Base64String1To4096", "traits": { - "smithy.api#documentation": "

          \n\t\t

          Specifies the base64-encoded value of the X.509\n\t\t\textension.

          ", + "smithy.api#documentation": "

          \n\t\t

          Specifies the base64-encoded value of the X.509 extension.

          ", "smithy.api#required": {} } }, @@ -1028,12 +1031,12 @@ "target": "com.amazonaws.acmpca#Boolean", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

          \n\t\t

          Specifies the critical flag of\n\t\t\tthe\n\t\t\tX.509\n\t\t\textension.

          " + "smithy.api#documentation": "

          \n\t\t

          Specifies the critical flag of the X.509 extension.

          " } } }, "traits": { - "smithy.api#documentation": "

          \n\t\t

          Specifies the X.509 extension information for a\n\t\t\tcertificate.

          \n\t\t

          Extensions present in CustomExtensions follow the\n\t\t\t\tApiPassthrough\n\t\t\t template\n\t\t\t\trules.

          " + "smithy.api#documentation": "

          \n\t\t

          Specifies the X.509 extension information for a certificate.

          \n\t\t

          Extensions present in CustomExtensions follow the\n\t\t\t\tApiPassthrough\n\t\t\t template\n\t\t\t\trules.

          " } }, "com.amazonaws.acmpca#CustomExtensionList": { @@ -1063,6 +1066,9 @@ "input": { "target": "com.amazonaws.acmpca#DeleteCertificateAuthorityRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#ConcurrentModificationException" @@ -1104,6 +1110,9 @@ "input": { "target": "com.amazonaws.acmpca#DeletePermissionRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#InvalidArnException" @@ -1152,6 +1161,9 @@ "input": { "target": "com.amazonaws.acmpca#DeletePolicyRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#ConcurrentModificationException" @@ -1453,7 +1465,7 @@ "CustomExtensions": { "target": "com.amazonaws.acmpca#CustomExtensionList", "traits": { - "smithy.api#documentation": "

          \n\t\t

          Contains a sequence of one or more X.509 extensions, each of which consists of an\n\t\t\tobject identifier (OID), a base64-encoded\n\t\t\tvalue,\n\t\t\tand the\n\t\t\tcritical flag.\n\t\t\tFor\n\t\t\tmore information, see the Global OID reference\n\t\t\t\tdatabase.\n

          \n\t\t\n\t\t \n\t\t\t

          The OID value of a CustomExtension must not\n\t\t\t\tmatch the OID of a predefined extension.

          \n\t\t
          " + "smithy.api#documentation": "

          \n\t\t

          Contains a sequence of one or more X.509 extensions, each of which consists of an\n\t\t\tobject identifier (OID), a base64-encoded value, and the critical flag. For more\n\t\t\tinformation, see the Global OID reference\n\t\t\t\tdatabase.\n

          " } } }, @@ -1818,6 +1830,9 @@ "input": { "target": "com.amazonaws.acmpca#ImportCertificateAuthorityCertificateRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#CertificateMismatchException" @@ -2658,6 +2673,9 @@ "input": { "target": "com.amazonaws.acmpca#PutPolicyRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#ConcurrentModificationException" @@ -2787,6 +2805,9 @@ "input": { "target": "com.amazonaws.acmpca#RestoreCertificateAuthorityRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#InvalidArnException" @@ -2878,6 +2899,9 @@ "input": { "target": "com.amazonaws.acmpca#RevokeCertificateRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#ConcurrentModificationException" @@ -3132,6 +3156,9 @@ "input": { "target": "com.amazonaws.acmpca#TagCertificateAuthorityRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#InvalidArnException" @@ -3221,6 +3248,9 @@ "input": { "target": "com.amazonaws.acmpca#UntagCertificateAuthorityRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#InvalidArnException" @@ -3263,6 +3293,9 @@ "input": { "target": "com.amazonaws.acmpca#UpdateCertificateAuthorityRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.acmpca#ConcurrentModificationException" From e67364c42bb347d50bd98d1a3be547dae722cea8 Mon Sep 17 00:00:00 2001 From: awstools Date: Wed, 20 Jul 2022 18:23:00 +0000 Subject: [PATCH 18/31] feat(clients): update client endpoints as of 2022-07-20 --- .../amazon/smithy/aws/typescript/codegen/endpoints.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json index 02dfa5228f61e..1d9b779d874cd 100644 --- a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json +++ b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json @@ -15530,6 +15530,10 @@ }, "workspaces-web": { "endpoints": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, "eu-west-1": {}, "us-east-1": {}, "us-west-2": {} From 5f303e3b43305fa416f638f78fc41bdce4cf6aef Mon Sep 17 00:00:00 2001 From: awstools Date: Wed, 20 Jul 2022 18:40:30 +0000 Subject: [PATCH 19/31] Publish v3.134.0 --- CHANGELOG.md | 12 ++++++++++++ clients/client-acm-pca/CHANGELOG.md | 8 ++++++++ clients/client-acm-pca/package.json | 2 +- clients/client-iot/CHANGELOG.md | 11 +++++++++++ clients/client-iot/package.json | 2 +- lerna.json | 2 +- 6 files changed, 34 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bd0e08feec1e3..f4e6b194aee2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,18 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.134.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.133.0...v3.134.0) (2022-07-20) + + +### Features + +* **client-iot:** GA release the ability to enable/disable IoT Fleet Indexing for Device Defender and Named Shadow information, and search them through IoT Fleet Indexing APIs. This includes Named Shadow Selection as a part of the UpdateIndexingConfiguration API. ([358a73e](https://github.com/aws/aws-sdk-js-v3/commit/358a73e37eca1782a115e9c304bdc27889fc7026)) +* **clients:** update client endpoints as of 2022-07-20 ([e67364c](https://github.com/aws/aws-sdk-js-v3/commit/e67364c42bb347d50bd98d1a3be547dae722cea8)) + + + + + # [3.133.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.132.0...v3.133.0) (2022-07-19) diff --git a/clients/client-acm-pca/CHANGELOG.md b/clients/client-acm-pca/CHANGELOG.md index 57cdb708def60..a2753394c09df 100644 --- a/clients/client-acm-pca/CHANGELOG.md +++ b/clients/client-acm-pca/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.134.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.133.0...v3.134.0) (2022-07-20) + +**Note:** Version bump only for package @aws-sdk/client-acm-pca + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-acm-pca diff --git a/clients/client-acm-pca/package.json b/clients/client-acm-pca/package.json index c676f34ca793b..f7929f7e92acd 100644 --- a/clients/client-acm-pca/package.json +++ b/clients/client-acm-pca/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-acm-pca", "description": "AWS SDK for JavaScript Acm Pca Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.134.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-iot/CHANGELOG.md b/clients/client-iot/CHANGELOG.md index 56d7b02c0ec7e..626154b0ca7a5 100644 --- a/clients/client-iot/CHANGELOG.md +++ b/clients/client-iot/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.134.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.133.0...v3.134.0) (2022-07-20) + + +### Features + +* **client-iot:** GA release the ability to enable/disable IoT Fleet Indexing for Device Defender and Named Shadow information, and search them through IoT Fleet Indexing APIs. This includes Named Shadow Selection as a part of the UpdateIndexingConfiguration API. ([358a73e](https://github.com/aws/aws-sdk-js-v3/commit/358a73e37eca1782a115e9c304bdc27889fc7026)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-iot diff --git a/clients/client-iot/package.json b/clients/client-iot/package.json index b2ea0fcfa7bf9..1e72c9da328f7 100644 --- a/clients/client-iot/package.json +++ b/clients/client-iot/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-iot", "description": "AWS SDK for JavaScript Iot Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.134.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/lerna.json b/lerna.json index 6c4ba7606a5c1..98f7010b674e1 100644 --- a/lerna.json +++ b/lerna.json @@ -1,5 +1,5 @@ { - "version": "3.133.0", + "version": "3.134.0", "npmClient": "yarn", "useWorkspaces": true, "command": { From 28ce22a2a8d6dbd3ccd11017b0b49cd3e69a56d8 Mon Sep 17 00:00:00 2001 From: AllanZhengYP Date: Thu, 21 Jul 2022 09:30:56 -0700 Subject: [PATCH 20/31] chore(clients): refactor eventstream code generation (#3805) * chore(codegen): refactor eventstream code generation * chore(clients): refactor eventstream code generation --- .../src/protocols/Aws_restJson1.ts | 800 +++++++----------- .../client-s3/src/protocols/Aws_restXml.ts | 214 ++--- clients/client-s3/test/e2e/S3.ispec.ts | 1 + .../src/protocols/Aws_restJson1.ts | 551 +++++------- .../aws/typescript/codegen/AwsRestXml.java | 2 +- .../codegen/RestJsonProtocolGenerator.java | 2 +- 6 files changed, 568 insertions(+), 1002 deletions(-) diff --git a/clients/client-lex-runtime-v2/src/protocols/Aws_restJson1.ts b/clients/client-lex-runtime-v2/src/protocols/Aws_restJson1.ts index 37fd8023df02e..ba25f388a3d67 100644 --- a/clients/client-lex-runtime-v2/src/protocols/Aws_restJson1.ts +++ b/clients/client-lex-runtime-v2/src/protocols/Aws_restJson1.ts @@ -13,6 +13,7 @@ import { Endpoint as __Endpoint, EventStreamSerdeContext as __EventStreamSerdeContext, Message as __Message, + MessageHeaders as __MessageHeaders, ResponseMetadata as __ResponseMetadata, SerdeContext as __SerdeContext, } from "@aws-sdk/types"; @@ -439,9 +440,7 @@ export const serializeAws_restJson1StartConversationCommand = async ( } let body: any; if (input.requestEventStream !== undefined) { - body = context.eventStreamMarshaller.serialize(input.requestEventStream, (event) => - serializeAws_restJson1StartConversationRequestEventStream_event(event, context) - ); + body = serializeAws_restJson1StartConversationRequestEventStream(input.requestEventStream, context); } return new __HttpRequest({ protocol, @@ -868,22 +867,8 @@ export const deserializeAws_restJson1StartConversationCommand = async ( $metadata: deserializeMetadata(output), responseEventStream: undefined, }; - const data: any = context.eventStreamMarshaller.deserialize(output.body, async (event) => { - const eventName = Object.keys(event)[0]; - const eventHeaders = Object.entries(event[eventName].headers).reduce((accummulator, curr) => { - accummulator[curr[0]] = curr[1].value; - return accummulator; - }, {} as Record); - const eventMessage = { - headers: eventHeaders, - body: event[eventName].body, - }; - const parsedEvent = { - [eventName]: eventMessage, - }; - return await deserializeAws_restJson1StartConversationResponseEventStream_event(parsedEvent, context); - }); - contents.responseEventStream = data; + const data: any = output.body; + contents.responseEventStream = deserializeAws_restJson1StartConversationResponseEventStream(data, context); return Promise.resolve(contents); }; @@ -923,206 +908,329 @@ const deserializeAws_restJson1StartConversationCommandError = async ( } }; -const serializeAws_restJson1StartConversationRequestEventStream_event = ( - input: any, +const deserializeAws_restJson1AccessDeniedExceptionResponse = async ( + parsedOutput: any, context: __SerdeContext -): __Message => { - return StartConversationRequestEventStream.visit(input, { - ConfigurationEvent: (value) => serializeAws_restJson1ConfigurationEvent_event(value, context), - AudioInputEvent: (value) => serializeAws_restJson1AudioInputEvent_event(value, context), - DTMFInputEvent: (value) => serializeAws_restJson1DTMFInputEvent_event(value, context), - TextInputEvent: (value) => serializeAws_restJson1TextInputEvent_event(value, context), - PlaybackCompletionEvent: (value) => serializeAws_restJson1PlaybackCompletionEvent_event(value, context), - DisconnectionEvent: (value) => serializeAws_restJson1DisconnectionEvent_event(value, context), - _: (value) => value as any, +): Promise => { + const contents: any = {}; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + const exception = new AccessDeniedException({ + $metadata: deserializeMetadata(parsedOutput), + ...contents, }); + return __decorateServiceException(exception, parsedOutput.body); }; -const deserializeAws_restJson1StartConversationResponseEventStream_event = async ( - output: any, + +const deserializeAws_restJson1BadGatewayExceptionResponse = async ( + parsedOutput: any, context: __SerdeContext -): Promise => { - if (output["PlaybackInterruptionEvent"] !== undefined) { - return { - PlaybackInterruptionEvent: await deserializeAws_restJson1PlaybackInterruptionEvent_event( - output["PlaybackInterruptionEvent"], - context - ), - }; - } - if (output["TranscriptEvent"] !== undefined) { - return { - TranscriptEvent: await deserializeAws_restJson1TranscriptEvent_event(output["TranscriptEvent"], context), - }; - } - if (output["IntentResultEvent"] !== undefined) { - return { - IntentResultEvent: await deserializeAws_restJson1IntentResultEvent_event(output["IntentResultEvent"], context), - }; - } - if (output["TextResponseEvent"] !== undefined) { - return { - TextResponseEvent: await deserializeAws_restJson1TextResponseEvent_event(output["TextResponseEvent"], context), - }; - } - if (output["AudioResponseEvent"] !== undefined) { - return { - AudioResponseEvent: await deserializeAws_restJson1AudioResponseEvent_event(output["AudioResponseEvent"], context), - }; - } - if (output["HeartbeatEvent"] !== undefined) { - return { - HeartbeatEvent: await deserializeAws_restJson1HeartbeatEvent_event(output["HeartbeatEvent"], context), - }; - } - if (output["AccessDeniedException"] !== undefined) { - return { - AccessDeniedException: await deserializeAws_restJson1AccessDeniedException_event( - output["AccessDeniedException"], - context - ), - }; - } - if (output["ResourceNotFoundException"] !== undefined) { - return { - ResourceNotFoundException: await deserializeAws_restJson1ResourceNotFoundException_event( - output["ResourceNotFoundException"], - context - ), - }; +): Promise => { + const contents: any = {}; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); } - if (output["ValidationException"] !== undefined) { - return { - ValidationException: await deserializeAws_restJson1ValidationException_event( - output["ValidationException"], - context - ), - }; + const exception = new BadGatewayException({ + $metadata: deserializeMetadata(parsedOutput), + ...contents, + }); + return __decorateServiceException(exception, parsedOutput.body); +}; + +const deserializeAws_restJson1ConflictExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: any = {}; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); } - if (output["ThrottlingException"] !== undefined) { - return { - ThrottlingException: await deserializeAws_restJson1ThrottlingException_event( - output["ThrottlingException"], - context - ), - }; + const exception = new ConflictException({ + $metadata: deserializeMetadata(parsedOutput), + ...contents, + }); + return __decorateServiceException(exception, parsedOutput.body); +}; + +const deserializeAws_restJson1DependencyFailedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: any = {}; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); } - if (output["InternalServerException"] !== undefined) { - return { - InternalServerException: await deserializeAws_restJson1InternalServerException_event( - output["InternalServerException"], - context - ), - }; + const exception = new DependencyFailedException({ + $metadata: deserializeMetadata(parsedOutput), + ...contents, + }); + return __decorateServiceException(exception, parsedOutput.body); +}; + +const deserializeAws_restJson1InternalServerExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: any = {}; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); } - if (output["ConflictException"] !== undefined) { - return { - ConflictException: await deserializeAws_restJson1ConflictException_event(output["ConflictException"], context), - }; + const exception = new InternalServerException({ + $metadata: deserializeMetadata(parsedOutput), + ...contents, + }); + return __decorateServiceException(exception, parsedOutput.body); +}; + +const deserializeAws_restJson1ResourceNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: any = {}; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); } - if (output["DependencyFailedException"] !== undefined) { - return { - DependencyFailedException: await deserializeAws_restJson1DependencyFailedException_event( - output["DependencyFailedException"], - context - ), - }; + const exception = new ResourceNotFoundException({ + $metadata: deserializeMetadata(parsedOutput), + ...contents, + }); + return __decorateServiceException(exception, parsedOutput.body); +}; + +const deserializeAws_restJson1ThrottlingExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: any = {}; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); } - if (output["BadGatewayException"] !== undefined) { - return { - BadGatewayException: await deserializeAws_restJson1BadGatewayException_event( - output["BadGatewayException"], - context - ), - }; + const exception = new ThrottlingException({ + $metadata: deserializeMetadata(parsedOutput), + ...contents, + }); + return __decorateServiceException(exception, parsedOutput.body); +}; + +const deserializeAws_restJson1ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: any = {}; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); } - return { $unknown: output }; + const exception = new ValidationException({ + $metadata: deserializeMetadata(parsedOutput), + ...contents, + }); + return __decorateServiceException(exception, parsedOutput.body); +}; + +const serializeAws_restJson1StartConversationRequestEventStream = ( + input: any, + context: __SerdeContext & __EventStreamSerdeContext +): any => { + const eventMarshallingVisitor = (event: any): __Message => + StartConversationRequestEventStream.visit(event, { + ConfigurationEvent: (value) => serializeAws_restJson1ConfigurationEvent_event(value, context), + AudioInputEvent: (value) => serializeAws_restJson1AudioInputEvent_event(value, context), + DTMFInputEvent: (value) => serializeAws_restJson1DTMFInputEvent_event(value, context), + TextInputEvent: (value) => serializeAws_restJson1TextInputEvent_event(value, context), + PlaybackCompletionEvent: (value) => serializeAws_restJson1PlaybackCompletionEvent_event(value, context), + DisconnectionEvent: (value) => serializeAws_restJson1DisconnectionEvent_event(value, context), + _: (value) => value as any, + }); + return context.eventStreamMarshaller.serialize(input, eventMarshallingVisitor); }; const serializeAws_restJson1AudioInputEvent_event = (input: AudioInputEvent, context: __SerdeContext): __Message => { - const message: __Message = { - headers: { - ":event-type": { type: "string", value: "AudioInputEvent" }, - ":message-type": { type: "string", value: "event" }, - ":content-type": { type: "string", value: "application/json" }, - }, - body: new Uint8Array(), + const headers: __MessageHeaders = { + ":event-type": { type: "string", value: "AudioInputEvent" }, + ":message-type": { type: "string", value: "event" }, + ":content-type": { type: "string", value: "application/json" }, }; - const body = serializeAws_restJson1AudioInputEvent(input, context); - message.body = context.utf8Decoder(JSON.stringify(body)); - return message; + let body = new Uint8Array(); + body = serializeAws_restJson1AudioInputEvent(input, context); + body = context.utf8Decoder(JSON.stringify(body)); + return { headers, body }; }; const serializeAws_restJson1ConfigurationEvent_event = ( input: ConfigurationEvent, context: __SerdeContext ): __Message => { - const message: __Message = { - headers: { - ":event-type": { type: "string", value: "ConfigurationEvent" }, - ":message-type": { type: "string", value: "event" }, - ":content-type": { type: "string", value: "application/json" }, - }, - body: new Uint8Array(), + const headers: __MessageHeaders = { + ":event-type": { type: "string", value: "ConfigurationEvent" }, + ":message-type": { type: "string", value: "event" }, + ":content-type": { type: "string", value: "application/json" }, }; - const body = serializeAws_restJson1ConfigurationEvent(input, context); - message.body = context.utf8Decoder(JSON.stringify(body)); - return message; + let body = new Uint8Array(); + body = serializeAws_restJson1ConfigurationEvent(input, context); + body = context.utf8Decoder(JSON.stringify(body)); + return { headers, body }; }; const serializeAws_restJson1DisconnectionEvent_event = ( input: DisconnectionEvent, context: __SerdeContext ): __Message => { - const message: __Message = { - headers: { - ":event-type": { type: "string", value: "DisconnectionEvent" }, - ":message-type": { type: "string", value: "event" }, - ":content-type": { type: "string", value: "application/json" }, - }, - body: new Uint8Array(), + const headers: __MessageHeaders = { + ":event-type": { type: "string", value: "DisconnectionEvent" }, + ":message-type": { type: "string", value: "event" }, + ":content-type": { type: "string", value: "application/json" }, }; - const body = serializeAws_restJson1DisconnectionEvent(input, context); - message.body = context.utf8Decoder(JSON.stringify(body)); - return message; + let body = new Uint8Array(); + body = serializeAws_restJson1DisconnectionEvent(input, context); + body = context.utf8Decoder(JSON.stringify(body)); + return { headers, body }; }; const serializeAws_restJson1DTMFInputEvent_event = (input: DTMFInputEvent, context: __SerdeContext): __Message => { - const message: __Message = { - headers: { - ":event-type": { type: "string", value: "DTMFInputEvent" }, - ":message-type": { type: "string", value: "event" }, - ":content-type": { type: "string", value: "application/json" }, - }, - body: new Uint8Array(), + const headers: __MessageHeaders = { + ":event-type": { type: "string", value: "DTMFInputEvent" }, + ":message-type": { type: "string", value: "event" }, + ":content-type": { type: "string", value: "application/json" }, }; - const body = serializeAws_restJson1DTMFInputEvent(input, context); - message.body = context.utf8Decoder(JSON.stringify(body)); - return message; + let body = new Uint8Array(); + body = serializeAws_restJson1DTMFInputEvent(input, context); + body = context.utf8Decoder(JSON.stringify(body)); + return { headers, body }; }; const serializeAws_restJson1PlaybackCompletionEvent_event = ( input: PlaybackCompletionEvent, context: __SerdeContext ): __Message => { - const message: __Message = { - headers: { - ":event-type": { type: "string", value: "PlaybackCompletionEvent" }, - ":message-type": { type: "string", value: "event" }, - ":content-type": { type: "string", value: "application/json" }, - }, - body: new Uint8Array(), + const headers: __MessageHeaders = { + ":event-type": { type: "string", value: "PlaybackCompletionEvent" }, + ":message-type": { type: "string", value: "event" }, + ":content-type": { type: "string", value: "application/json" }, }; - const body = serializeAws_restJson1PlaybackCompletionEvent(input, context); - message.body = context.utf8Decoder(JSON.stringify(body)); - return message; + let body = new Uint8Array(); + body = serializeAws_restJson1PlaybackCompletionEvent(input, context); + body = context.utf8Decoder(JSON.stringify(body)); + return { headers, body }; }; const serializeAws_restJson1TextInputEvent_event = (input: TextInputEvent, context: __SerdeContext): __Message => { - const message: __Message = { - headers: { - ":event-type": { type: "string", value: "TextInputEvent" }, - ":message-type": { type: "string", value: "event" }, - ":content-type": { type: "string", value: "application/json" }, - }, - body: new Uint8Array(), + const headers: __MessageHeaders = { + ":event-type": { type: "string", value: "TextInputEvent" }, + ":message-type": { type: "string", value: "event" }, + ":content-type": { type: "string", value: "application/json" }, }; - const body = serializeAws_restJson1TextInputEvent(input, context); - message.body = context.utf8Decoder(JSON.stringify(body)); - return message; + let body = new Uint8Array(); + body = serializeAws_restJson1TextInputEvent(input, context); + body = context.utf8Decoder(JSON.stringify(body)); + return { headers, body }; +}; +const deserializeAws_restJson1StartConversationResponseEventStream = ( + output: any, + context: __SerdeContext & __EventStreamSerdeContext +): AsyncIterable => { + return context.eventStreamMarshaller.deserialize(output, async (event) => { + if (event["PlaybackInterruptionEvent"] != null) { + return { + PlaybackInterruptionEvent: await deserializeAws_restJson1PlaybackInterruptionEvent_event( + event["PlaybackInterruptionEvent"], + context + ), + }; + } + if (event["TranscriptEvent"] != null) { + return { + TranscriptEvent: await deserializeAws_restJson1TranscriptEvent_event(event["TranscriptEvent"], context), + }; + } + if (event["IntentResultEvent"] != null) { + return { + IntentResultEvent: await deserializeAws_restJson1IntentResultEvent_event(event["IntentResultEvent"], context), + }; + } + if (event["TextResponseEvent"] != null) { + return { + TextResponseEvent: await deserializeAws_restJson1TextResponseEvent_event(event["TextResponseEvent"], context), + }; + } + if (event["AudioResponseEvent"] != null) { + return { + AudioResponseEvent: await deserializeAws_restJson1AudioResponseEvent_event( + event["AudioResponseEvent"], + context + ), + }; + } + if (event["HeartbeatEvent"] != null) { + return { + HeartbeatEvent: await deserializeAws_restJson1HeartbeatEvent_event(event["HeartbeatEvent"], context), + }; + } + if (event["AccessDeniedException"] != null) { + return { + AccessDeniedException: await deserializeAws_restJson1AccessDeniedException_event( + event["AccessDeniedException"], + context + ), + }; + } + if (event["ResourceNotFoundException"] != null) { + return { + ResourceNotFoundException: await deserializeAws_restJson1ResourceNotFoundException_event( + event["ResourceNotFoundException"], + context + ), + }; + } + if (event["ValidationException"] != null) { + return { + ValidationException: await deserializeAws_restJson1ValidationException_event( + event["ValidationException"], + context + ), + }; + } + if (event["ThrottlingException"] != null) { + return { + ThrottlingException: await deserializeAws_restJson1ThrottlingException_event( + event["ThrottlingException"], + context + ), + }; + } + if (event["InternalServerException"] != null) { + return { + InternalServerException: await deserializeAws_restJson1InternalServerException_event( + event["InternalServerException"], + context + ), + }; + } + if (event["ConflictException"] != null) { + return { + ConflictException: await deserializeAws_restJson1ConflictException_event(event["ConflictException"], context), + }; + } + if (event["DependencyFailedException"] != null) { + return { + DependencyFailedException: await deserializeAws_restJson1DependencyFailedException_event( + event["DependencyFailedException"], + context + ), + }; + } + if (event["BadGatewayException"] != null) { + return { + BadGatewayException: await deserializeAws_restJson1BadGatewayException_event( + event["BadGatewayException"], + context + ), + }; + } + return { $unknown: output }; + }); }; const deserializeAws_restJson1AccessDeniedException_event = async ( output: any, @@ -1138,12 +1246,9 @@ const deserializeAws_restJson1AudioResponseEvent_event = async ( output: any, context: __SerdeContext ): Promise => { - let contents: AudioResponseEvent = {} as any; + const contents: AudioResponseEvent = {} as any; const data: any = await parseBody(output.body, context); - contents = { - ...contents, - ...deserializeAws_restJson1AudioResponseEvent(data, context), - } as any; + Object.assign(contents, deserializeAws_restJson1AudioResponseEvent(data, context)); return contents; }; const deserializeAws_restJson1BadGatewayException_event = async ( @@ -1180,24 +1285,18 @@ const deserializeAws_restJson1HeartbeatEvent_event = async ( output: any, context: __SerdeContext ): Promise => { - let contents: HeartbeatEvent = {} as any; + const contents: HeartbeatEvent = {} as any; const data: any = await parseBody(output.body, context); - contents = { - ...contents, - ...deserializeAws_restJson1HeartbeatEvent(data, context), - } as any; + Object.assign(contents, deserializeAws_restJson1HeartbeatEvent(data, context)); return contents; }; const deserializeAws_restJson1IntentResultEvent_event = async ( output: any, context: __SerdeContext ): Promise => { - let contents: IntentResultEvent = {} as any; + const contents: IntentResultEvent = {} as any; const data: any = await parseBody(output.body, context); - contents = { - ...contents, - ...deserializeAws_restJson1IntentResultEvent(data, context), - } as any; + Object.assign(contents, deserializeAws_restJson1IntentResultEvent(data, context)); return contents; }; const deserializeAws_restJson1InternalServerException_event = async ( @@ -1214,12 +1313,9 @@ const deserializeAws_restJson1PlaybackInterruptionEvent_event = async ( output: any, context: __SerdeContext ): Promise => { - let contents: PlaybackInterruptionEvent = {} as any; + const contents: PlaybackInterruptionEvent = {} as any; const data: any = await parseBody(output.body, context); - contents = { - ...contents, - ...deserializeAws_restJson1PlaybackInterruptionEvent(data, context), - } as any; + Object.assign(contents, deserializeAws_restJson1PlaybackInterruptionEvent(data, context)); return contents; }; const deserializeAws_restJson1ResourceNotFoundException_event = async ( @@ -1236,12 +1332,9 @@ const deserializeAws_restJson1TextResponseEvent_event = async ( output: any, context: __SerdeContext ): Promise => { - let contents: TextResponseEvent = {} as any; + const contents: TextResponseEvent = {} as any; const data: any = await parseBody(output.body, context); - contents = { - ...contents, - ...deserializeAws_restJson1TextResponseEvent(data, context), - } as any; + Object.assign(contents, deserializeAws_restJson1TextResponseEvent(data, context)); return contents; }; const deserializeAws_restJson1ThrottlingException_event = async ( @@ -1258,12 +1351,9 @@ const deserializeAws_restJson1TranscriptEvent_event = async ( output: any, context: __SerdeContext ): Promise => { - let contents: TranscriptEvent = {} as any; + const contents: TranscriptEvent = {} as any; const data: any = await parseBody(output.body, context); - contents = { - ...contents, - ...deserializeAws_restJson1TranscriptEvent(data, context), - } as any; + Object.assign(contents, deserializeAws_restJson1TranscriptEvent(data, context)); return contents; }; const deserializeAws_restJson1ValidationException_event = async ( @@ -1276,134 +1366,6 @@ const deserializeAws_restJson1ValidationException_event = async ( }; return deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context); }; -const deserializeAws_restJson1AccessDeniedExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const contents: any = {}; - const data: any = parsedOutput.body; - if (data.message !== undefined && data.message !== null) { - contents.message = __expectString(data.message); - } - const exception = new AccessDeniedException({ - $metadata: deserializeMetadata(parsedOutput), - ...contents, - }); - return __decorateServiceException(exception, parsedOutput.body); -}; - -const deserializeAws_restJson1BadGatewayExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const contents: any = {}; - const data: any = parsedOutput.body; - if (data.message !== undefined && data.message !== null) { - contents.message = __expectString(data.message); - } - const exception = new BadGatewayException({ - $metadata: deserializeMetadata(parsedOutput), - ...contents, - }); - return __decorateServiceException(exception, parsedOutput.body); -}; - -const deserializeAws_restJson1ConflictExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const contents: any = {}; - const data: any = parsedOutput.body; - if (data.message !== undefined && data.message !== null) { - contents.message = __expectString(data.message); - } - const exception = new ConflictException({ - $metadata: deserializeMetadata(parsedOutput), - ...contents, - }); - return __decorateServiceException(exception, parsedOutput.body); -}; - -const deserializeAws_restJson1DependencyFailedExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const contents: any = {}; - const data: any = parsedOutput.body; - if (data.message !== undefined && data.message !== null) { - contents.message = __expectString(data.message); - } - const exception = new DependencyFailedException({ - $metadata: deserializeMetadata(parsedOutput), - ...contents, - }); - return __decorateServiceException(exception, parsedOutput.body); -}; - -const deserializeAws_restJson1InternalServerExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const contents: any = {}; - const data: any = parsedOutput.body; - if (data.message !== undefined && data.message !== null) { - contents.message = __expectString(data.message); - } - const exception = new InternalServerException({ - $metadata: deserializeMetadata(parsedOutput), - ...contents, - }); - return __decorateServiceException(exception, parsedOutput.body); -}; - -const deserializeAws_restJson1ResourceNotFoundExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const contents: any = {}; - const data: any = parsedOutput.body; - if (data.message !== undefined && data.message !== null) { - contents.message = __expectString(data.message); - } - const exception = new ResourceNotFoundException({ - $metadata: deserializeMetadata(parsedOutput), - ...contents, - }); - return __decorateServiceException(exception, parsedOutput.body); -}; - -const deserializeAws_restJson1ThrottlingExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const contents: any = {}; - const data: any = parsedOutput.body; - if (data.message !== undefined && data.message !== null) { - contents.message = __expectString(data.message); - } - const exception = new ThrottlingException({ - $metadata: deserializeMetadata(parsedOutput), - ...contents, - }); - return __decorateServiceException(exception, parsedOutput.body); -}; - -const deserializeAws_restJson1ValidationExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const contents: any = {}; - const data: any = parsedOutput.body; - if (data.message !== undefined && data.message !== null) { - contents.message = __expectString(data.message); - } - const exception = new ValidationException({ - $metadata: deserializeMetadata(parsedOutput), - ...contents, - }); - return __decorateServiceException(exception, parsedOutput.body); -}; - const serializeAws_restJson1ActiveContext = (input: ActiveContext, context: __SerdeContext): any => { return { ...(input.contextAttributes != null && { @@ -1669,23 +1631,6 @@ const serializeAws_restJson1Slots = (input: Record, context: __Ser }, {}); }; -const serializeAws_restJson1StartConversationRequestEventStream = ( - input: StartConversationRequestEventStream, - context: __SerdeContext -): any => { - return StartConversationRequestEventStream.visit(input, { - AudioInputEvent: (value) => ({ AudioInputEvent: serializeAws_restJson1AudioInputEvent(value, context) }), - ConfigurationEvent: (value) => ({ ConfigurationEvent: serializeAws_restJson1ConfigurationEvent(value, context) }), - DTMFInputEvent: (value) => ({ DTMFInputEvent: serializeAws_restJson1DTMFInputEvent(value, context) }), - DisconnectionEvent: (value) => ({ DisconnectionEvent: serializeAws_restJson1DisconnectionEvent(value, context) }), - PlaybackCompletionEvent: (value) => ({ - PlaybackCompletionEvent: serializeAws_restJson1PlaybackCompletionEvent(value, context), - }), - TextInputEvent: (value) => ({ TextInputEvent: serializeAws_restJson1TextInputEvent(value, context) }), - _: (name, value) => ({ name: value } as any), - }); -}; - const serializeAws_restJson1StringList = (input: string[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -1738,12 +1683,6 @@ const serializeAws_restJson1Values = (input: Slot[], context: __SerdeContext): a }); }; -const deserializeAws_restJson1AccessDeniedException = (output: any, context: __SerdeContext): AccessDeniedException => { - return { - message: __expectString(output.message), - } as any; -}; - const deserializeAws_restJson1ActiveContext = (output: any, context: __SerdeContext): ActiveContext => { return { contextAttributes: @@ -1803,12 +1742,6 @@ const deserializeAws_restJson1AudioResponseEvent = (output: any, context: __Serd } as any; }; -const deserializeAws_restJson1BadGatewayException = (output: any, context: __SerdeContext): BadGatewayException => { - return { - message: __expectString(output.message), - } as any; -}; - const deserializeAws_restJson1Button = (output: any, context: __SerdeContext): Button => { return { text: __expectString(output.text), @@ -1834,21 +1767,6 @@ const deserializeAws_restJson1ConfidenceScore = (output: any, context: __SerdeCo } as any; }; -const deserializeAws_restJson1ConflictException = (output: any, context: __SerdeContext): ConflictException => { - return { - message: __expectString(output.message), - } as any; -}; - -const deserializeAws_restJson1DependencyFailedException = ( - output: any, - context: __SerdeContext -): DependencyFailedException => { - return { - message: __expectString(output.message), - } as any; -}; - const deserializeAws_restJson1DialogAction = (output: any, context: __SerdeContext): DialogAction => { return { slotElicitationStyle: __expectString(output.slotElicitationStyle), @@ -1899,15 +1817,6 @@ const deserializeAws_restJson1IntentResultEvent = (output: any, context: __Serde } as any; }; -const deserializeAws_restJson1InternalServerException = ( - output: any, - context: __SerdeContext -): InternalServerException => { - return { - message: __expectString(output.message), - } as any; -}; - const deserializeAws_restJson1Interpretation = (output: any, context: __SerdeContext): Interpretation => { return { intent: output.intent != null ? deserializeAws_restJson1Intent(output.intent, context) : undefined, @@ -1966,15 +1875,6 @@ const deserializeAws_restJson1PlaybackInterruptionEvent = ( } as any; }; -const deserializeAws_restJson1ResourceNotFoundException = ( - output: any, - context: __SerdeContext -): ResourceNotFoundException => { - return { - message: __expectString(output.message), - } as any; -}; - const deserializeAws_restJson1RuntimeHintDetails = (output: any, context: __SerdeContext): RuntimeHintDetails => { return { runtimeHintValues: @@ -2100,92 +2000,6 @@ const deserializeAws_restJson1Slots = (output: any, context: __SerdeContext): Re }, {}); }; -const deserializeAws_restJson1StartConversationResponseEventStream = ( - output: any, - context: __SerdeContext -): StartConversationResponseEventStream => { - if (output.AccessDeniedException !== undefined && output.AccessDeniedException !== null) { - return { - AccessDeniedException: deserializeAws_restJson1AccessDeniedException(output.AccessDeniedException, context), - }; - } - if (output.AudioResponseEvent !== undefined && output.AudioResponseEvent !== null) { - return { - AudioResponseEvent: deserializeAws_restJson1AudioResponseEvent(output.AudioResponseEvent, context), - }; - } - if (output.BadGatewayException !== undefined && output.BadGatewayException !== null) { - return { - BadGatewayException: deserializeAws_restJson1BadGatewayException(output.BadGatewayException, context), - }; - } - if (output.ConflictException !== undefined && output.ConflictException !== null) { - return { - ConflictException: deserializeAws_restJson1ConflictException(output.ConflictException, context), - }; - } - if (output.DependencyFailedException !== undefined && output.DependencyFailedException !== null) { - return { - DependencyFailedException: deserializeAws_restJson1DependencyFailedException( - output.DependencyFailedException, - context - ), - }; - } - if (output.HeartbeatEvent !== undefined && output.HeartbeatEvent !== null) { - return { - HeartbeatEvent: deserializeAws_restJson1HeartbeatEvent(output.HeartbeatEvent, context), - }; - } - if (output.IntentResultEvent !== undefined && output.IntentResultEvent !== null) { - return { - IntentResultEvent: deserializeAws_restJson1IntentResultEvent(output.IntentResultEvent, context), - }; - } - if (output.InternalServerException !== undefined && output.InternalServerException !== null) { - return { - InternalServerException: deserializeAws_restJson1InternalServerException(output.InternalServerException, context), - }; - } - if (output.PlaybackInterruptionEvent !== undefined && output.PlaybackInterruptionEvent !== null) { - return { - PlaybackInterruptionEvent: deserializeAws_restJson1PlaybackInterruptionEvent( - output.PlaybackInterruptionEvent, - context - ), - }; - } - if (output.ResourceNotFoundException !== undefined && output.ResourceNotFoundException !== null) { - return { - ResourceNotFoundException: deserializeAws_restJson1ResourceNotFoundException( - output.ResourceNotFoundException, - context - ), - }; - } - if (output.TextResponseEvent !== undefined && output.TextResponseEvent !== null) { - return { - TextResponseEvent: deserializeAws_restJson1TextResponseEvent(output.TextResponseEvent, context), - }; - } - if (output.ThrottlingException !== undefined && output.ThrottlingException !== null) { - return { - ThrottlingException: deserializeAws_restJson1ThrottlingException(output.ThrottlingException, context), - }; - } - if (output.TranscriptEvent !== undefined && output.TranscriptEvent !== null) { - return { - TranscriptEvent: deserializeAws_restJson1TranscriptEvent(output.TranscriptEvent, context), - }; - } - if (output.ValidationException !== undefined && output.ValidationException !== null) { - return { - ValidationException: deserializeAws_restJson1ValidationException(output.ValidationException, context), - }; - } - return { $unknown: Object.entries(output)[0] }; -}; - const deserializeAws_restJson1StringList = (output: any, context: __SerdeContext): string[] => { const retVal = (output || []) .filter((e: any) => e != null) @@ -2217,12 +2031,6 @@ const deserializeAws_restJson1TextResponseEvent = (output: any, context: __Serde } as any; }; -const deserializeAws_restJson1ThrottlingException = (output: any, context: __SerdeContext): ThrottlingException => { - return { - message: __expectString(output.message), - } as any; -}; - const deserializeAws_restJson1TranscriptEvent = (output: any, context: __SerdeContext): TranscriptEvent => { return { eventId: __expectString(output.eventId), @@ -2230,12 +2038,6 @@ const deserializeAws_restJson1TranscriptEvent = (output: any, context: __SerdeCo } as any; }; -const deserializeAws_restJson1ValidationException = (output: any, context: __SerdeContext): ValidationException => { - return { - message: __expectString(output.message), - } as any; -}; - const deserializeAws_restJson1Value = (output: any, context: __SerdeContext): Value => { return { interpretedValue: __expectString(output.interpretedValue), diff --git a/clients/client-s3/src/protocols/Aws_restXml.ts b/clients/client-s3/src/protocols/Aws_restXml.ts index 126afa32c5728..6cff484fc14f2 100644 --- a/clients/client-s3/src/protocols/Aws_restXml.ts +++ b/clients/client-s3/src/protocols/Aws_restXml.ts @@ -9705,22 +9705,8 @@ export const deserializeAws_restXmlSelectObjectContentCommand = async ( $metadata: deserializeMetadata(output), Payload: undefined, }; - const data: any = context.eventStreamMarshaller.deserialize(output.body, async (event) => { - const eventName = Object.keys(event)[0]; - const eventHeaders = Object.entries(event[eventName].headers).reduce((accummulator, curr) => { - accummulator[curr[0]] = curr[1].value; - return accummulator; - }, {} as Record); - const eventMessage = { - headers: eventHeaders, - body: event[eventName].body, - }; - const parsedEvent = { - [eventName]: eventMessage, - }; - return await deserializeAws_restXmlSelectObjectContentEventStream_event(parsedEvent, context); - }); - contents.Payload = data; + const data: any = output.body; + contents.Payload = deserializeAws_restXmlSelectObjectContentEventStream(data, context); return Promise.resolve(contents); }; @@ -9936,69 +9922,6 @@ const deserializeAws_restXmlWriteGetObjectResponseCommandError = async ( } }; -const deserializeAws_restXmlSelectObjectContentEventStream_event = async ( - output: any, - context: __SerdeContext -): Promise => { - if (output["Records"] !== undefined) { - return { - Records: await deserializeAws_restXmlRecordsEvent_event(output["Records"], context), - }; - } - if (output["Stats"] !== undefined) { - return { - Stats: await deserializeAws_restXmlStatsEvent_event(output["Stats"], context), - }; - } - if (output["Progress"] !== undefined) { - return { - Progress: await deserializeAws_restXmlProgressEvent_event(output["Progress"], context), - }; - } - if (output["Cont"] !== undefined) { - return { - Cont: await deserializeAws_restXmlContinuationEvent_event(output["Cont"], context), - }; - } - if (output["End"] !== undefined) { - return { - End: await deserializeAws_restXmlEndEvent_event(output["End"], context), - }; - } - return { $unknown: output }; -}; -const deserializeAws_restXmlContinuationEvent_event = async ( - output: any, - context: __SerdeContext -): Promise => { - const contents: ContinuationEvent = {} as any; - return contents; -}; -const deserializeAws_restXmlEndEvent_event = async (output: any, context: __SerdeContext): Promise => { - const contents: EndEvent = {} as any; - return contents; -}; -const deserializeAws_restXmlProgressEvent_event = async ( - output: any, - context: __SerdeContext -): Promise => { - const contents: ProgressEvent = {} as any; - contents.Details = await parseBody(output.body, context); - return contents; -}; -const deserializeAws_restXmlRecordsEvent_event = async ( - output: any, - context: __SerdeContext -): Promise => { - const contents: RecordsEvent = {} as any; - contents.Payload = output.body; - return contents; -}; -const deserializeAws_restXmlStatsEvent_event = async (output: any, context: __SerdeContext): Promise => { - const contents: StatsEvent = {} as any; - contents.Details = await parseBody(output.body, context); - return contents; -}; const deserializeAws_restXmlBucketAlreadyExistsResponse = async ( parsedOutput: any, context: __SerdeContext @@ -10122,6 +10045,77 @@ const deserializeAws_restXmlObjectNotInActiveTierErrorResponse = async ( return __decorateServiceException(exception, parsedOutput.body); }; +const deserializeAws_restXmlSelectObjectContentEventStream = ( + output: any, + context: __SerdeContext & __EventStreamSerdeContext +): AsyncIterable => { + return context.eventStreamMarshaller.deserialize(output, async (event) => { + if (event["Records"] != null) { + return { + Records: await deserializeAws_restXmlRecordsEvent_event(event["Records"], context), + }; + } + if (event["Stats"] != null) { + return { + Stats: await deserializeAws_restXmlStatsEvent_event(event["Stats"], context), + }; + } + if (event["Progress"] != null) { + return { + Progress: await deserializeAws_restXmlProgressEvent_event(event["Progress"], context), + }; + } + if (event["Cont"] != null) { + return { + Cont: await deserializeAws_restXmlContinuationEvent_event(event["Cont"], context), + }; + } + if (event["End"] != null) { + return { + End: await deserializeAws_restXmlEndEvent_event(event["End"], context), + }; + } + return { $unknown: output }; + }); +}; +const deserializeAws_restXmlContinuationEvent_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const contents: ContinuationEvent = {} as any; + const data: any = await parseBody(output.body, context); + Object.assign(contents, deserializeAws_restXmlContinuationEvent(data, context)); + return contents; +}; +const deserializeAws_restXmlEndEvent_event = async (output: any, context: __SerdeContext): Promise => { + const contents: EndEvent = {} as any; + const data: any = await parseBody(output.body, context); + Object.assign(contents, deserializeAws_restXmlEndEvent(data, context)); + return contents; +}; +const deserializeAws_restXmlProgressEvent_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const contents: ProgressEvent = {} as any; + const data: any = await parseBody(output.body, context); + contents.Details = deserializeAws_restXmlProgress(data, context); + return contents; +}; +const deserializeAws_restXmlRecordsEvent_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const contents: RecordsEvent = {} as any; + contents.Payload = output.body; + return contents; +}; +const deserializeAws_restXmlStatsEvent_event = async (output: any, context: __SerdeContext): Promise => { + const contents: StatsEvent = {} as any; + const data: any = await parseBody(output.body, context); + contents.Details = deserializeAws_restXmlStats(data, context); + return contents; +}; const serializeAws_restXmlAbortIncompleteMultipartUpload = ( input: AbortIncompleteMultipartUpload, context: __SerdeContext @@ -14163,16 +14157,6 @@ const deserializeAws_restXmlProgress = (output: any, context: __SerdeContext): P return contents; }; -const deserializeAws_restXmlProgressEvent = (output: any, context: __SerdeContext): ProgressEvent => { - const contents: any = { - Details: undefined, - }; - if (output["Details"] !== undefined) { - contents.Details = deserializeAws_restXmlProgress(output["Details"], context); - } - return contents; -}; - const deserializeAws_restXmlPublicAccessBlockConfiguration = ( output: any, context: __SerdeContext @@ -14233,16 +14217,6 @@ const deserializeAws_restXmlQueueConfigurationList = (output: any, context: __Se }); }; -const deserializeAws_restXmlRecordsEvent = (output: any, context: __SerdeContext): RecordsEvent => { - const contents: any = { - Payload: undefined, - }; - if (output["Payload"] !== undefined) { - contents.Payload = context.base64Decoder(output["Payload"]); - } - return contents; -}; - const deserializeAws_restXmlRedirect = (output: any, context: __SerdeContext): Redirect => { const contents: any = { HostName: undefined, @@ -14475,38 +14449,6 @@ const deserializeAws_restXmlS3KeyFilter = (output: any, context: __SerdeContext) return contents; }; -const deserializeAws_restXmlSelectObjectContentEventStream = ( - output: any, - context: __SerdeContext -): SelectObjectContentEventStream => { - if (output["Records"] !== undefined) { - return { - Records: deserializeAws_restXmlRecordsEvent(output["Records"], context), - }; - } - if (output["Stats"] !== undefined) { - return { - Stats: deserializeAws_restXmlStatsEvent(output["Stats"], context), - }; - } - if (output["Progress"] !== undefined) { - return { - Progress: deserializeAws_restXmlProgressEvent(output["Progress"], context), - }; - } - if (output["Cont"] !== undefined) { - return { - Cont: deserializeAws_restXmlContinuationEvent(output["Cont"], context), - }; - } - if (output["End"] !== undefined) { - return { - End: deserializeAws_restXmlEndEvent(output["End"], context), - }; - } - return { $unknown: Object.entries(output)[0] }; -}; - const deserializeAws_restXmlServerSideEncryptionByDefault = ( output: any, context: __SerdeContext @@ -14636,16 +14578,6 @@ const deserializeAws_restXmlStats = (output: any, context: __SerdeContext): Stat return contents; }; -const deserializeAws_restXmlStatsEvent = (output: any, context: __SerdeContext): StatsEvent => { - const contents: any = { - Details: undefined, - }; - if (output["Details"] !== undefined) { - contents.Details = deserializeAws_restXmlStats(output["Details"], context); - } - return contents; -}; - const deserializeAws_restXmlStorageClassAnalysis = (output: any, context: __SerdeContext): StorageClassAnalysis => { const contents: any = { DataExport: undefined, diff --git a/clients/client-s3/test/e2e/S3.ispec.ts b/clients/client-s3/test/e2e/S3.ispec.ts index 7f69636d92d31..fa18b87e333c7 100644 --- a/clients/client-s3/test/e2e/S3.ispec.ts +++ b/clients/client-s3/test/e2e/S3.ispec.ts @@ -281,6 +281,7 @@ esfuture,29`; }); const events: SelectObjectContentEventStream[] = []; for await (const event of Payload!) { + console.log(event); events.push(event); } expect(events.length).to.equal(3); diff --git a/clients/client-transcribe-streaming/src/protocols/Aws_restJson1.ts b/clients/client-transcribe-streaming/src/protocols/Aws_restJson1.ts index 8f42691d68730..c1bd2803e002b 100644 --- a/clients/client-transcribe-streaming/src/protocols/Aws_restJson1.ts +++ b/clients/client-transcribe-streaming/src/protocols/Aws_restJson1.ts @@ -12,6 +12,7 @@ import { Endpoint as __Endpoint, EventStreamSerdeContext as __EventStreamSerdeContext, Message as __Message, + MessageHeaders as __MessageHeaders, ResponseMetadata as __ResponseMetadata, SerdeContext as __SerdeContext, } from "@aws-sdk/types"; @@ -84,9 +85,7 @@ export const serializeAws_restJson1StartMedicalStreamTranscriptionCommand = asyn `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/medical-stream-transcription"; let body: any; if (input.AudioStream !== undefined) { - body = context.eventStreamMarshaller.serialize(input.AudioStream, (event) => - serializeAws_restJson1AudioStream_event(event, context) - ); + body = serializeAws_restJson1AudioStream(input.AudioStream, context); } return new __HttpRequest({ protocol, @@ -166,9 +165,7 @@ export const serializeAws_restJson1StartStreamTranscriptionCommand = async ( const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/stream-transcription"; let body: any; if (input.AudioStream !== undefined) { - body = context.eventStreamMarshaller.serialize(input.AudioStream, (event) => - serializeAws_restJson1AudioStream_event(event, context) - ); + body = serializeAws_restJson1AudioStream(input.AudioStream, context); } return new __HttpRequest({ protocol, @@ -242,22 +239,8 @@ export const deserializeAws_restJson1StartMedicalStreamTranscriptionCommand = as if (output.headers["x-amzn-transcribe-content-identification-type"] !== undefined) { contents.ContentIdentificationType = output.headers["x-amzn-transcribe-content-identification-type"]; } - const data: any = context.eventStreamMarshaller.deserialize(output.body, async (event) => { - const eventName = Object.keys(event)[0]; - const eventHeaders = Object.entries(event[eventName].headers).reduce((accummulator, curr) => { - accummulator[curr[0]] = curr[1].value; - return accummulator; - }, {} as Record); - const eventMessage = { - headers: eventHeaders, - body: event[eventName].body, - }; - const parsedEvent = { - [eventName]: eventMessage, - }; - return await deserializeAws_restJson1MedicalTranscriptResultStream_event(parsedEvent, context); - }); - contents.TranscriptResultStream = data; + const data: any = output.body; + contents.TranscriptResultStream = deserializeAws_restJson1MedicalTranscriptResultStream(data, context); return Promise.resolve(contents); }; @@ -403,22 +386,8 @@ export const deserializeAws_restJson1StartStreamTranscriptionCommand = async ( if (output.headers["x-amzn-transcribe-vocabulary-filter-names"] !== undefined) { contents.VocabularyFilterNames = output.headers["x-amzn-transcribe-vocabulary-filter-names"]; } - const data: any = context.eventStreamMarshaller.deserialize(output.body, async (event) => { - const eventName = Object.keys(event)[0]; - const eventHeaders = Object.entries(event[eventName].headers).reduce((accummulator, curr) => { - accummulator[curr[0]] = curr[1].value; - return accummulator; - }, {} as Record); - const eventMessage = { - headers: eventHeaders, - body: event[eventName].body, - }; - const parsedEvent = { - [eventName]: eventMessage, - }; - return await deserializeAws_restJson1TranscriptResultStream_event(parsedEvent, context); - }); - contents.TranscriptResultStream = data; + const data: any = output.body; + contents.TranscriptResultStream = deserializeAws_restJson1TranscriptResultStream(data, context); return Promise.resolve(contents); }; @@ -461,194 +430,6 @@ const deserializeAws_restJson1StartStreamTranscriptionCommandError = async ( } }; -const serializeAws_restJson1AudioStream_event = (input: any, context: __SerdeContext): __Message => { - return AudioStream.visit(input, { - AudioEvent: (value) => serializeAws_restJson1AudioEvent_event(value, context), - _: (value) => value as any, - }); -}; -const deserializeAws_restJson1MedicalTranscriptResultStream_event = async ( - output: any, - context: __SerdeContext -): Promise => { - if (output["TranscriptEvent"] !== undefined) { - return { - TranscriptEvent: await deserializeAws_restJson1MedicalTranscriptEvent_event(output["TranscriptEvent"], context), - }; - } - if (output["BadRequestException"] !== undefined) { - return { - BadRequestException: await deserializeAws_restJson1BadRequestException_event( - output["BadRequestException"], - context - ), - }; - } - if (output["LimitExceededException"] !== undefined) { - return { - LimitExceededException: await deserializeAws_restJson1LimitExceededException_event( - output["LimitExceededException"], - context - ), - }; - } - if (output["InternalFailureException"] !== undefined) { - return { - InternalFailureException: await deserializeAws_restJson1InternalFailureException_event( - output["InternalFailureException"], - context - ), - }; - } - if (output["ConflictException"] !== undefined) { - return { - ConflictException: await deserializeAws_restJson1ConflictException_event(output["ConflictException"], context), - }; - } - if (output["ServiceUnavailableException"] !== undefined) { - return { - ServiceUnavailableException: await deserializeAws_restJson1ServiceUnavailableException_event( - output["ServiceUnavailableException"], - context - ), - }; - } - return { $unknown: output }; -}; -const deserializeAws_restJson1TranscriptResultStream_event = async ( - output: any, - context: __SerdeContext -): Promise => { - if (output["TranscriptEvent"] !== undefined) { - return { - TranscriptEvent: await deserializeAws_restJson1TranscriptEvent_event(output["TranscriptEvent"], context), - }; - } - if (output["BadRequestException"] !== undefined) { - return { - BadRequestException: await deserializeAws_restJson1BadRequestException_event( - output["BadRequestException"], - context - ), - }; - } - if (output["LimitExceededException"] !== undefined) { - return { - LimitExceededException: await deserializeAws_restJson1LimitExceededException_event( - output["LimitExceededException"], - context - ), - }; - } - if (output["InternalFailureException"] !== undefined) { - return { - InternalFailureException: await deserializeAws_restJson1InternalFailureException_event( - output["InternalFailureException"], - context - ), - }; - } - if (output["ConflictException"] !== undefined) { - return { - ConflictException: await deserializeAws_restJson1ConflictException_event(output["ConflictException"], context), - }; - } - if (output["ServiceUnavailableException"] !== undefined) { - return { - ServiceUnavailableException: await deserializeAws_restJson1ServiceUnavailableException_event( - output["ServiceUnavailableException"], - context - ), - }; - } - return { $unknown: output }; -}; -const serializeAws_restJson1AudioEvent_event = (input: AudioEvent, context: __SerdeContext): __Message => { - const message: __Message = { - headers: { - ":event-type": { type: "string", value: "AudioEvent" }, - ":message-type": { type: "string", value: "event" }, - ":content-type": { type: "string", value: "application/octet-stream" }, - }, - body: new Uint8Array(), - }; - message.body = input.AudioChunk || message.body; - return message; -}; -const deserializeAws_restJson1BadRequestException_event = async ( - output: any, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - return deserializeAws_restJson1BadRequestExceptionResponse(parsedOutput, context); -}; -const deserializeAws_restJson1ConflictException_event = async ( - output: any, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - return deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); -}; -const deserializeAws_restJson1InternalFailureException_event = async ( - output: any, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - return deserializeAws_restJson1InternalFailureExceptionResponse(parsedOutput, context); -}; -const deserializeAws_restJson1LimitExceededException_event = async ( - output: any, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - return deserializeAws_restJson1LimitExceededExceptionResponse(parsedOutput, context); -}; -const deserializeAws_restJson1MedicalTranscriptEvent_event = async ( - output: any, - context: __SerdeContext -): Promise => { - let contents: MedicalTranscriptEvent = {} as any; - const data: any = await parseBody(output.body, context); - contents = { - ...contents, - ...deserializeAws_restJson1MedicalTranscriptEvent(data, context), - } as any; - return contents; -}; -const deserializeAws_restJson1ServiceUnavailableException_event = async ( - output: any, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - return deserializeAws_restJson1ServiceUnavailableExceptionResponse(parsedOutput, context); -}; -const deserializeAws_restJson1TranscriptEvent_event = async ( - output: any, - context: __SerdeContext -): Promise => { - let contents: TranscriptEvent = {} as any; - const data: any = await parseBody(output.body, context); - contents = { - ...contents, - ...deserializeAws_restJson1TranscriptEvent(data, context), - } as any; - return contents; -}; const deserializeAws_restJson1BadRequestExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -729,19 +510,194 @@ const deserializeAws_restJson1ServiceUnavailableExceptionResponse = async ( return __decorateServiceException(exception, parsedOutput.body); }; -const serializeAws_restJson1AudioEvent = (input: AudioEvent, context: __SerdeContext): any => { - return { - ...(input.AudioChunk != null && { AudioChunk: context.base64Encoder(input.AudioChunk) }), +const serializeAws_restJson1AudioStream = (input: any, context: __SerdeContext & __EventStreamSerdeContext): any => { + const eventMarshallingVisitor = (event: any): __Message => + AudioStream.visit(event, { + AudioEvent: (value) => serializeAws_restJson1AudioEvent_event(value, context), + _: (value) => value as any, + }); + return context.eventStreamMarshaller.serialize(input, eventMarshallingVisitor); +}; +const serializeAws_restJson1AudioEvent_event = (input: AudioEvent, context: __SerdeContext): __Message => { + const headers: __MessageHeaders = { + ":event-type": { type: "string", value: "AudioEvent" }, + ":message-type": { type: "string", value: "event" }, + ":content-type": { type: "string", value: "application/octet-stream" }, }; + let body = new Uint8Array(); + if (input.AudioChunk != null) { + body = input.AudioChunk; + } + return { headers, body }; }; - -const serializeAws_restJson1AudioStream = (input: AudioStream, context: __SerdeContext): any => { - return AudioStream.visit(input, { - AudioEvent: (value) => ({ AudioEvent: serializeAws_restJson1AudioEvent(value, context) }), - _: (name, value) => ({ name: value } as any), +const deserializeAws_restJson1MedicalTranscriptResultStream = ( + output: any, + context: __SerdeContext & __EventStreamSerdeContext +): AsyncIterable => { + return context.eventStreamMarshaller.deserialize(output, async (event) => { + if (event["TranscriptEvent"] != null) { + return { + TranscriptEvent: await deserializeAws_restJson1MedicalTranscriptEvent_event(event["TranscriptEvent"], context), + }; + } + if (event["BadRequestException"] != null) { + return { + BadRequestException: await deserializeAws_restJson1BadRequestException_event( + event["BadRequestException"], + context + ), + }; + } + if (event["LimitExceededException"] != null) { + return { + LimitExceededException: await deserializeAws_restJson1LimitExceededException_event( + event["LimitExceededException"], + context + ), + }; + } + if (event["InternalFailureException"] != null) { + return { + InternalFailureException: await deserializeAws_restJson1InternalFailureException_event( + event["InternalFailureException"], + context + ), + }; + } + if (event["ConflictException"] != null) { + return { + ConflictException: await deserializeAws_restJson1ConflictException_event(event["ConflictException"], context), + }; + } + if (event["ServiceUnavailableException"] != null) { + return { + ServiceUnavailableException: await deserializeAws_restJson1ServiceUnavailableException_event( + event["ServiceUnavailableException"], + context + ), + }; + } + return { $unknown: output }; }); }; - +const deserializeAws_restJson1TranscriptResultStream = ( + output: any, + context: __SerdeContext & __EventStreamSerdeContext +): AsyncIterable => { + return context.eventStreamMarshaller.deserialize(output, async (event) => { + if (event["TranscriptEvent"] != null) { + return { + TranscriptEvent: await deserializeAws_restJson1TranscriptEvent_event(event["TranscriptEvent"], context), + }; + } + if (event["BadRequestException"] != null) { + return { + BadRequestException: await deserializeAws_restJson1BadRequestException_event( + event["BadRequestException"], + context + ), + }; + } + if (event["LimitExceededException"] != null) { + return { + LimitExceededException: await deserializeAws_restJson1LimitExceededException_event( + event["LimitExceededException"], + context + ), + }; + } + if (event["InternalFailureException"] != null) { + return { + InternalFailureException: await deserializeAws_restJson1InternalFailureException_event( + event["InternalFailureException"], + context + ), + }; + } + if (event["ConflictException"] != null) { + return { + ConflictException: await deserializeAws_restJson1ConflictException_event(event["ConflictException"], context), + }; + } + if (event["ServiceUnavailableException"] != null) { + return { + ServiceUnavailableException: await deserializeAws_restJson1ServiceUnavailableException_event( + event["ServiceUnavailableException"], + context + ), + }; + } + return { $unknown: output }; + }); +}; +const deserializeAws_restJson1BadRequestException_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + return deserializeAws_restJson1BadRequestExceptionResponse(parsedOutput, context); +}; +const deserializeAws_restJson1ConflictException_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + return deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context); +}; +const deserializeAws_restJson1InternalFailureException_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + return deserializeAws_restJson1InternalFailureExceptionResponse(parsedOutput, context); +}; +const deserializeAws_restJson1LimitExceededException_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + return deserializeAws_restJson1LimitExceededExceptionResponse(parsedOutput, context); +}; +const deserializeAws_restJson1MedicalTranscriptEvent_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const contents: MedicalTranscriptEvent = {} as any; + const data: any = await parseBody(output.body, context); + Object.assign(contents, deserializeAws_restJson1MedicalTranscriptEvent(data, context)); + return contents; +}; +const deserializeAws_restJson1ServiceUnavailableException_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + return deserializeAws_restJson1ServiceUnavailableExceptionResponse(parsedOutput, context); +}; +const deserializeAws_restJson1TranscriptEvent_event = async ( + output: any, + context: __SerdeContext +): Promise => { + const contents: TranscriptEvent = {} as any; + const data: any = await parseBody(output.body, context); + Object.assign(contents, deserializeAws_restJson1TranscriptEvent(data, context)); + return contents; +}; const deserializeAws_restJson1Alternative = (output: any, context: __SerdeContext): Alternative => { return { Entities: output.Entities != null ? deserializeAws_restJson1EntityList(output.Entities, context) : undefined, @@ -762,18 +718,6 @@ const deserializeAws_restJson1AlternativeList = (output: any, context: __SerdeCo return retVal; }; -const deserializeAws_restJson1BadRequestException = (output: any, context: __SerdeContext): BadRequestException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_restJson1ConflictException = (output: any, context: __SerdeContext): ConflictException => { - return { - Message: __expectString(output.Message), - } as any; -}; - const deserializeAws_restJson1Entity = (output: any, context: __SerdeContext): Entity => { return { Category: __expectString(output.Category), @@ -797,15 +741,6 @@ const deserializeAws_restJson1EntityList = (output: any, context: __SerdeContext return retVal; }; -const deserializeAws_restJson1InternalFailureException = ( - output: any, - context: __SerdeContext -): InternalFailureException => { - return { - Message: __expectString(output.Message), - } as any; -}; - const deserializeAws_restJson1Item = (output: any, context: __SerdeContext): Item => { return { Confidence: __limitedParseDouble(output.Confidence), @@ -850,15 +785,6 @@ const deserializeAws_restJson1LanguageWithScore = (output: any, context: __Serde } as any; }; -const deserializeAws_restJson1LimitExceededException = ( - output: any, - context: __SerdeContext -): LimitExceededException => { - return { - Message: __expectString(output.Message), - } as any; -}; - const deserializeAws_restJson1MedicalAlternative = (output: any, context: __SerdeContext): MedicalAlternative => { return { Entities: output.Entities != null ? deserializeAws_restJson1MedicalEntityList(output.Entities, context) : undefined, @@ -966,49 +892,6 @@ const deserializeAws_restJson1MedicalTranscriptEvent = ( } as any; }; -const deserializeAws_restJson1MedicalTranscriptResultStream = ( - output: any, - context: __SerdeContext -): MedicalTranscriptResultStream => { - if (output.BadRequestException !== undefined && output.BadRequestException !== null) { - return { - BadRequestException: deserializeAws_restJson1BadRequestException(output.BadRequestException, context), - }; - } - if (output.ConflictException !== undefined && output.ConflictException !== null) { - return { - ConflictException: deserializeAws_restJson1ConflictException(output.ConflictException, context), - }; - } - if (output.InternalFailureException !== undefined && output.InternalFailureException !== null) { - return { - InternalFailureException: deserializeAws_restJson1InternalFailureException( - output.InternalFailureException, - context - ), - }; - } - if (output.LimitExceededException !== undefined && output.LimitExceededException !== null) { - return { - LimitExceededException: deserializeAws_restJson1LimitExceededException(output.LimitExceededException, context), - }; - } - if (output.ServiceUnavailableException !== undefined && output.ServiceUnavailableException !== null) { - return { - ServiceUnavailableException: deserializeAws_restJson1ServiceUnavailableException( - output.ServiceUnavailableException, - context - ), - }; - } - if (output.TranscriptEvent !== undefined && output.TranscriptEvent !== null) { - return { - TranscriptEvent: deserializeAws_restJson1MedicalTranscriptEvent(output.TranscriptEvent, context), - }; - } - return { $unknown: Object.entries(output)[0] }; -}; - const deserializeAws_restJson1Result = (output: any, context: __SerdeContext): Result => { return { Alternatives: @@ -1038,15 +921,6 @@ const deserializeAws_restJson1ResultList = (output: any, context: __SerdeContext return retVal; }; -const deserializeAws_restJson1ServiceUnavailableException = ( - output: any, - context: __SerdeContext -): ServiceUnavailableException => { - return { - Message: __expectString(output.Message), - } as any; -}; - const deserializeAws_restJson1Transcript = (output: any, context: __SerdeContext): Transcript => { return { Results: output.Results != null ? deserializeAws_restJson1ResultList(output.Results, context) : undefined, @@ -1059,49 +933,6 @@ const deserializeAws_restJson1TranscriptEvent = (output: any, context: __SerdeCo } as any; }; -const deserializeAws_restJson1TranscriptResultStream = ( - output: any, - context: __SerdeContext -): TranscriptResultStream => { - if (output.BadRequestException !== undefined && output.BadRequestException !== null) { - return { - BadRequestException: deserializeAws_restJson1BadRequestException(output.BadRequestException, context), - }; - } - if (output.ConflictException !== undefined && output.ConflictException !== null) { - return { - ConflictException: deserializeAws_restJson1ConflictException(output.ConflictException, context), - }; - } - if (output.InternalFailureException !== undefined && output.InternalFailureException !== null) { - return { - InternalFailureException: deserializeAws_restJson1InternalFailureException( - output.InternalFailureException, - context - ), - }; - } - if (output.LimitExceededException !== undefined && output.LimitExceededException !== null) { - return { - LimitExceededException: deserializeAws_restJson1LimitExceededException(output.LimitExceededException, context), - }; - } - if (output.ServiceUnavailableException !== undefined && output.ServiceUnavailableException !== null) { - return { - ServiceUnavailableException: deserializeAws_restJson1ServiceUnavailableException( - output.ServiceUnavailableException, - context - ), - }; - } - if (output.TranscriptEvent !== undefined && output.TranscriptEvent !== null) { - return { - TranscriptEvent: deserializeAws_restJson1TranscriptEvent(output.TranscriptEvent, context), - }; - } - return { $unknown: Object.entries(output)[0] }; -}; - const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ httpStatusCode: output.statusCode, requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], diff --git a/codegen/smithy-aws-typescript-codegen/src/main/java/software/amazon/smithy/aws/typescript/codegen/AwsRestXml.java b/codegen/smithy-aws-typescript-codegen/src/main/java/software/amazon/smithy/aws/typescript/codegen/AwsRestXml.java index a2b6dfc595a36..2f1fe82c10aa5 100644 --- a/codegen/smithy-aws-typescript-codegen/src/main/java/software/amazon/smithy/aws/typescript/codegen/AwsRestXml.java +++ b/codegen/smithy-aws-typescript-codegen/src/main/java/software/amazon/smithy/aws/typescript/codegen/AwsRestXml.java @@ -155,7 +155,7 @@ protected void serializeInputDocumentBody( @Override protected void serializeInputEventDocumentPayload(GenerationContext context) { TypeScriptWriter writer = context.getWriter(); - writer.write("message.body = context.utf8Decoder(body.toString());"); + writer.write("body = context.utf8Decoder(body.toString());"); } @Override diff --git a/codegen/smithy-aws-typescript-codegen/src/main/java/software/amazon/smithy/aws/typescript/codegen/RestJsonProtocolGenerator.java b/codegen/smithy-aws-typescript-codegen/src/main/java/software/amazon/smithy/aws/typescript/codegen/RestJsonProtocolGenerator.java index 527454b934e02..7fe878804889d 100644 --- a/codegen/smithy-aws-typescript-codegen/src/main/java/software/amazon/smithy/aws/typescript/codegen/RestJsonProtocolGenerator.java +++ b/codegen/smithy-aws-typescript-codegen/src/main/java/software/amazon/smithy/aws/typescript/codegen/RestJsonProtocolGenerator.java @@ -186,7 +186,7 @@ protected void serializeInputPayload( @Override protected void serializeInputEventDocumentPayload(GenerationContext context) { TypeScriptWriter writer = context.getWriter(); - writer.write("message.body = context.utf8Decoder(JSON.stringify(body));"); + writer.write("body = context.utf8Decoder(JSON.stringify(body));"); } @Override From 972d020ec9d8e713b9306aa8a39b036886127fc0 Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:02 +0000 Subject: [PATCH 21/31] feat(client-iotsitewise): Added asynchronous API to ingest bulk historical and current data into IoT SiteWise. --- clients/client-iotsitewise/src/IoTSiteWise.ts | 134 ++++ .../src/IoTSiteWiseClient.ts | 15 + .../commands/CreateBulkImportJobCommand.ts | 107 ++++ .../commands/DescribeBulkImportJobCommand.ts | 102 ++++ .../src/commands/ListBulkImportJobsCommand.ts | 102 ++++ .../client-iotsitewise/src/commands/index.ts | 3 + .../client-iotsitewise/src/models/models_0.ts | 439 ++++++++++++++ .../pagination/ListBulkImportJobsPaginator.ts | 61 ++ .../src/pagination/index.ts | 1 + .../src/protocols/Aws_restJson1.ts | 470 ++++++++++++++ .../sdk-codegen/aws-models/iotsitewise.json | 574 ++++++++++++++++++ 11 files changed, 2008 insertions(+) create mode 100644 clients/client-iotsitewise/src/commands/CreateBulkImportJobCommand.ts create mode 100644 clients/client-iotsitewise/src/commands/DescribeBulkImportJobCommand.ts create mode 100644 clients/client-iotsitewise/src/commands/ListBulkImportJobsCommand.ts create mode 100644 clients/client-iotsitewise/src/pagination/ListBulkImportJobsPaginator.ts diff --git a/clients/client-iotsitewise/src/IoTSiteWise.ts b/clients/client-iotsitewise/src/IoTSiteWise.ts index c6c9fbfe6cb2e..85e2a7de3b071 100644 --- a/clients/client-iotsitewise/src/IoTSiteWise.ts +++ b/clients/client-iotsitewise/src/IoTSiteWise.ts @@ -52,6 +52,11 @@ import { CreateAssetModelCommandInput, CreateAssetModelCommandOutput, } from "./commands/CreateAssetModelCommand"; +import { + CreateBulkImportJobCommand, + CreateBulkImportJobCommandInput, + CreateBulkImportJobCommandOutput, +} from "./commands/CreateBulkImportJobCommand"; import { CreateDashboardCommand, CreateDashboardCommandInput, @@ -128,6 +133,11 @@ import { DescribeAssetPropertyCommandInput, DescribeAssetPropertyCommandOutput, } from "./commands/DescribeAssetPropertyCommand"; +import { + DescribeBulkImportJobCommand, + DescribeBulkImportJobCommandInput, + DescribeBulkImportJobCommandOutput, +} from "./commands/DescribeBulkImportJobCommand"; import { DescribeDashboardCommand, DescribeDashboardCommandInput, @@ -224,6 +234,11 @@ import { ListAssociatedAssetsCommandInput, ListAssociatedAssetsCommandOutput, } from "./commands/ListAssociatedAssetsCommand"; +import { + ListBulkImportJobsCommand, + ListBulkImportJobsCommandInput, + ListBulkImportJobsCommandOutput, +} from "./commands/ListBulkImportJobsCommand"; import { ListDashboardsCommand, ListDashboardsCommandInput, @@ -706,6 +721,49 @@ export class IoTSiteWise extends IoTSiteWiseClient { } } + /** + * + *

          This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.

          + *
          + *

          Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, + * see Create a bulk import job (CLI) + * in the Amazon Simple Storage Service User Guide.

          + * + *

          You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job. + * For more information about how to configure storage settings, + * see PutStorageConfiguration.

          + *
          + */ + public createBulkImportJob( + args: CreateBulkImportJobCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createBulkImportJob( + args: CreateBulkImportJobCommandInput, + cb: (err: any, data?: CreateBulkImportJobCommandOutput) => void + ): void; + public createBulkImportJob( + args: CreateBulkImportJobCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateBulkImportJobCommandOutput) => void + ): void; + public createBulkImportJob( + args: CreateBulkImportJobCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateBulkImportJobCommandOutput) => void), + cb?: (err: any, data?: CreateBulkImportJobCommandOutput) => void + ): Promise | void { + const command = new CreateBulkImportJobCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

          Creates a dashboard in an IoT SiteWise Monitor project.

          */ @@ -1259,6 +1317,44 @@ export class IoTSiteWise extends IoTSiteWiseClient { } } + /** + * + *

          This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.

          + *
          + *

          Retrieves information about a bulk import job request. For more information, + * see Describe a bulk import job (CLI) + * in the Amazon Simple Storage Service User Guide.

          + */ + public describeBulkImportJob( + args: DescribeBulkImportJobCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeBulkImportJob( + args: DescribeBulkImportJobCommandInput, + cb: (err: any, data?: DescribeBulkImportJobCommandOutput) => void + ): void; + public describeBulkImportJob( + args: DescribeBulkImportJobCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeBulkImportJobCommandOutput) => void + ): void; + public describeBulkImportJob( + args: DescribeBulkImportJobCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeBulkImportJobCommandOutput) => void), + cb?: (err: any, data?: DescribeBulkImportJobCommandOutput) => void + ): Promise | void { + const command = new DescribeBulkImportJobCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

          Retrieves information about a dashboard.

          */ @@ -1996,6 +2092,44 @@ export class IoTSiteWise extends IoTSiteWiseClient { } } + /** + * + *

          This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.

          + *
          + *

          Retrieves a paginated list of bulk import job requests. For more information, + * see List bulk import jobs (CLI) + * in the Amazon Simple Storage Service User Guide.

          + */ + public listBulkImportJobs( + args: ListBulkImportJobsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listBulkImportJobs( + args: ListBulkImportJobsCommandInput, + cb: (err: any, data?: ListBulkImportJobsCommandOutput) => void + ): void; + public listBulkImportJobs( + args: ListBulkImportJobsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListBulkImportJobsCommandOutput) => void + ): void; + public listBulkImportJobs( + args: ListBulkImportJobsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListBulkImportJobsCommandOutput) => void), + cb?: (err: any, data?: ListBulkImportJobsCommandOutput) => void + ): Promise | void { + const command = new ListBulkImportJobsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

          Retrieves a paginated list of dashboards for an IoT SiteWise Monitor project.

          */ diff --git a/clients/client-iotsitewise/src/IoTSiteWiseClient.ts b/clients/client-iotsitewise/src/IoTSiteWiseClient.ts index 0370cfa593875..dc828a327c221 100644 --- a/clients/client-iotsitewise/src/IoTSiteWiseClient.ts +++ b/clients/client-iotsitewise/src/IoTSiteWiseClient.ts @@ -85,6 +85,10 @@ import { import { CreateAccessPolicyCommandInput, CreateAccessPolicyCommandOutput } from "./commands/CreateAccessPolicyCommand"; import { CreateAssetCommandInput, CreateAssetCommandOutput } from "./commands/CreateAssetCommand"; import { CreateAssetModelCommandInput, CreateAssetModelCommandOutput } from "./commands/CreateAssetModelCommand"; +import { + CreateBulkImportJobCommandInput, + CreateBulkImportJobCommandOutput, +} from "./commands/CreateBulkImportJobCommand"; import { CreateDashboardCommandInput, CreateDashboardCommandOutput } from "./commands/CreateDashboardCommand"; import { CreateGatewayCommandInput, CreateGatewayCommandOutput } from "./commands/CreateGatewayCommand"; import { CreatePortalCommandInput, CreatePortalCommandOutput } from "./commands/CreatePortalCommand"; @@ -107,6 +111,10 @@ import { DescribeAssetPropertyCommandInput, DescribeAssetPropertyCommandOutput, } from "./commands/DescribeAssetPropertyCommand"; +import { + DescribeBulkImportJobCommandInput, + DescribeBulkImportJobCommandOutput, +} from "./commands/DescribeBulkImportJobCommand"; import { DescribeDashboardCommandInput, DescribeDashboardCommandOutput } from "./commands/DescribeDashboardCommand"; import { DescribeDefaultEncryptionConfigurationCommandInput, @@ -160,6 +168,7 @@ import { ListAssociatedAssetsCommandInput, ListAssociatedAssetsCommandOutput, } from "./commands/ListAssociatedAssetsCommand"; +import { ListBulkImportJobsCommandInput, ListBulkImportJobsCommandOutput } from "./commands/ListBulkImportJobsCommand"; import { ListDashboardsCommandInput, ListDashboardsCommandOutput } from "./commands/ListDashboardsCommand"; import { ListGatewaysCommandInput, ListGatewaysCommandOutput } from "./commands/ListGatewaysCommand"; import { ListPortalsCommandInput, ListPortalsCommandOutput } from "./commands/ListPortalsCommand"; @@ -210,6 +219,7 @@ export type ServiceInputTypes = | CreateAccessPolicyCommandInput | CreateAssetCommandInput | CreateAssetModelCommandInput + | CreateBulkImportJobCommandInput | CreateDashboardCommandInput | CreateGatewayCommandInput | CreatePortalCommandInput @@ -226,6 +236,7 @@ export type ServiceInputTypes = | DescribeAssetCommandInput | DescribeAssetModelCommandInput | DescribeAssetPropertyCommandInput + | DescribeBulkImportJobCommandInput | DescribeDashboardCommandInput | DescribeDefaultEncryptionConfigurationCommandInput | DescribeGatewayCapabilityConfigurationCommandInput @@ -246,6 +257,7 @@ export type ServiceInputTypes = | ListAssetRelationshipsCommandInput | ListAssetsCommandInput | ListAssociatedAssetsCommandInput + | ListBulkImportJobsCommandInput | ListDashboardsCommandInput | ListGatewaysCommandInput | ListPortalsCommandInput @@ -280,6 +292,7 @@ export type ServiceOutputTypes = | CreateAccessPolicyCommandOutput | CreateAssetCommandOutput | CreateAssetModelCommandOutput + | CreateBulkImportJobCommandOutput | CreateDashboardCommandOutput | CreateGatewayCommandOutput | CreatePortalCommandOutput @@ -296,6 +309,7 @@ export type ServiceOutputTypes = | DescribeAssetCommandOutput | DescribeAssetModelCommandOutput | DescribeAssetPropertyCommandOutput + | DescribeBulkImportJobCommandOutput | DescribeDashboardCommandOutput | DescribeDefaultEncryptionConfigurationCommandOutput | DescribeGatewayCapabilityConfigurationCommandOutput @@ -316,6 +330,7 @@ export type ServiceOutputTypes = | ListAssetRelationshipsCommandOutput | ListAssetsCommandOutput | ListAssociatedAssetsCommandOutput + | ListBulkImportJobsCommandOutput | ListDashboardsCommandOutput | ListGatewaysCommandOutput | ListPortalsCommandOutput diff --git a/clients/client-iotsitewise/src/commands/CreateBulkImportJobCommand.ts b/clients/client-iotsitewise/src/commands/CreateBulkImportJobCommand.ts new file mode 100644 index 0000000000000..3c7442b17a38c --- /dev/null +++ b/clients/client-iotsitewise/src/commands/CreateBulkImportJobCommand.ts @@ -0,0 +1,107 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTSiteWiseClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTSiteWiseClient"; +import { CreateBulkImportJobRequest, CreateBulkImportJobResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateBulkImportJobCommand, + serializeAws_restJson1CreateBulkImportJobCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateBulkImportJobCommandInput extends CreateBulkImportJobRequest {} +export interface CreateBulkImportJobCommandOutput extends CreateBulkImportJobResponse, __MetadataBearer {} + +/** + * + *

          This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.

          + *
          + *

          Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, + * see Create a bulk import job (CLI) + * in the Amazon Simple Storage Service User Guide.

          + * + *

          You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job. + * For more information about how to configure storage settings, + * see PutStorageConfiguration.

          + *
          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTSiteWiseClient, CreateBulkImportJobCommand } from "@aws-sdk/client-iotsitewise"; // ES Modules import + * // const { IoTSiteWiseClient, CreateBulkImportJobCommand } = require("@aws-sdk/client-iotsitewise"); // CommonJS import + * const client = new IoTSiteWiseClient(config); + * const command = new CreateBulkImportJobCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateBulkImportJobCommandInput} for command's `input` shape. + * @see {@link CreateBulkImportJobCommandOutput} for command's `response` shape. + * @see {@link IoTSiteWiseClientResolvedConfig | config} for IoTSiteWiseClient's `config` shape. + * + */ +export class CreateBulkImportJobCommand extends $Command< + CreateBulkImportJobCommandInput, + CreateBulkImportJobCommandOutput, + IoTSiteWiseClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateBulkImportJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTSiteWiseClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTSiteWiseClient"; + const commandName = "CreateBulkImportJobCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateBulkImportJobRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateBulkImportJobResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateBulkImportJobCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateBulkImportJobCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateBulkImportJobCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iotsitewise/src/commands/DescribeBulkImportJobCommand.ts b/clients/client-iotsitewise/src/commands/DescribeBulkImportJobCommand.ts new file mode 100644 index 0000000000000..6f7c141ec8dd0 --- /dev/null +++ b/clients/client-iotsitewise/src/commands/DescribeBulkImportJobCommand.ts @@ -0,0 +1,102 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTSiteWiseClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTSiteWiseClient"; +import { DescribeBulkImportJobRequest, DescribeBulkImportJobResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DescribeBulkImportJobCommand, + serializeAws_restJson1DescribeBulkImportJobCommand, +} from "../protocols/Aws_restJson1"; + +export interface DescribeBulkImportJobCommandInput extends DescribeBulkImportJobRequest {} +export interface DescribeBulkImportJobCommandOutput extends DescribeBulkImportJobResponse, __MetadataBearer {} + +/** + * + *

          This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.

          + *
          + *

          Retrieves information about a bulk import job request. For more information, + * see Describe a bulk import job (CLI) + * in the Amazon Simple Storage Service User Guide.

          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTSiteWiseClient, DescribeBulkImportJobCommand } from "@aws-sdk/client-iotsitewise"; // ES Modules import + * // const { IoTSiteWiseClient, DescribeBulkImportJobCommand } = require("@aws-sdk/client-iotsitewise"); // CommonJS import + * const client = new IoTSiteWiseClient(config); + * const command = new DescribeBulkImportJobCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeBulkImportJobCommandInput} for command's `input` shape. + * @see {@link DescribeBulkImportJobCommandOutput} for command's `response` shape. + * @see {@link IoTSiteWiseClientResolvedConfig | config} for IoTSiteWiseClient's `config` shape. + * + */ +export class DescribeBulkImportJobCommand extends $Command< + DescribeBulkImportJobCommandInput, + DescribeBulkImportJobCommandOutput, + IoTSiteWiseClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeBulkImportJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTSiteWiseClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTSiteWiseClient"; + const commandName = "DescribeBulkImportJobCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeBulkImportJobRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeBulkImportJobResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribeBulkImportJobCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DescribeBulkImportJobCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DescribeBulkImportJobCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iotsitewise/src/commands/ListBulkImportJobsCommand.ts b/clients/client-iotsitewise/src/commands/ListBulkImportJobsCommand.ts new file mode 100644 index 0000000000000..6908c3461d010 --- /dev/null +++ b/clients/client-iotsitewise/src/commands/ListBulkImportJobsCommand.ts @@ -0,0 +1,102 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTSiteWiseClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTSiteWiseClient"; +import { ListBulkImportJobsRequest, ListBulkImportJobsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListBulkImportJobsCommand, + serializeAws_restJson1ListBulkImportJobsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListBulkImportJobsCommandInput extends ListBulkImportJobsRequest {} +export interface ListBulkImportJobsCommandOutput extends ListBulkImportJobsResponse, __MetadataBearer {} + +/** + * + *

          This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.

          + *
          + *

          Retrieves a paginated list of bulk import job requests. For more information, + * see List bulk import jobs (CLI) + * in the Amazon Simple Storage Service User Guide.

          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTSiteWiseClient, ListBulkImportJobsCommand } from "@aws-sdk/client-iotsitewise"; // ES Modules import + * // const { IoTSiteWiseClient, ListBulkImportJobsCommand } = require("@aws-sdk/client-iotsitewise"); // CommonJS import + * const client = new IoTSiteWiseClient(config); + * const command = new ListBulkImportJobsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListBulkImportJobsCommandInput} for command's `input` shape. + * @see {@link ListBulkImportJobsCommandOutput} for command's `response` shape. + * @see {@link IoTSiteWiseClientResolvedConfig | config} for IoTSiteWiseClient's `config` shape. + * + */ +export class ListBulkImportJobsCommand extends $Command< + ListBulkImportJobsCommandInput, + ListBulkImportJobsCommandOutput, + IoTSiteWiseClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListBulkImportJobsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTSiteWiseClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTSiteWiseClient"; + const commandName = "ListBulkImportJobsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListBulkImportJobsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListBulkImportJobsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListBulkImportJobsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListBulkImportJobsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListBulkImportJobsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iotsitewise/src/commands/index.ts b/clients/client-iotsitewise/src/commands/index.ts index 959312d4a2184..556e32ae7bf3f 100644 --- a/clients/client-iotsitewise/src/commands/index.ts +++ b/clients/client-iotsitewise/src/commands/index.ts @@ -10,6 +10,7 @@ export * from "./BatchPutAssetPropertyValueCommand"; export * from "./CreateAccessPolicyCommand"; export * from "./CreateAssetCommand"; export * from "./CreateAssetModelCommand"; +export * from "./CreateBulkImportJobCommand"; export * from "./CreateDashboardCommand"; export * from "./CreateGatewayCommand"; export * from "./CreatePortalCommand"; @@ -26,6 +27,7 @@ export * from "./DescribeAccessPolicyCommand"; export * from "./DescribeAssetCommand"; export * from "./DescribeAssetModelCommand"; export * from "./DescribeAssetPropertyCommand"; +export * from "./DescribeBulkImportJobCommand"; export * from "./DescribeDashboardCommand"; export * from "./DescribeDefaultEncryptionConfigurationCommand"; export * from "./DescribeGatewayCapabilityConfigurationCommand"; @@ -46,6 +48,7 @@ export * from "./ListAssetModelsCommand"; export * from "./ListAssetRelationshipsCommand"; export * from "./ListAssetsCommand"; export * from "./ListAssociatedAssetsCommand"; +export * from "./ListBulkImportJobsCommand"; export * from "./ListDashboardsCommand"; export * from "./ListGatewaysCommand"; export * from "./ListPortalsCommand"; diff --git a/clients/client-iotsitewise/src/models/models_0.ts b/clients/client-iotsitewise/src/models/models_0.ts index 90cf2d7505a87..37f176040da2e 100644 --- a/clients/client-iotsitewise/src/models/models_0.ts +++ b/clients/client-iotsitewise/src/models/models_0.ts @@ -3124,6 +3124,231 @@ export namespace CreateAssetModelResponse { }); } +/** + *

          The Amazon S3 destination where errors associated with the job creation request are saved.

          + */ +export interface ErrorReportLocation { + /** + *

          The name of the Amazon S3 bucket to which errors associated with the bulk import job are sent.

          + */ + bucket: string | undefined; + + /** + *

          Amazon S3 uses the prefix as a folder name to organize data in the bucket. + * Each Amazon S3 object has a key that is its unique identifier in the bucket. + * Each object in a bucket has exactly one key. The prefix must end with a forward slash (/). + * For more information, see Organizing objects using prefixes + * in the Amazon Simple Storage Service User Guide.

          + */ + prefix: string | undefined; +} + +export namespace ErrorReportLocation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ErrorReportLocation): any => ({ + ...obj, + }); +} + +/** + *

          The file in Amazon S3 where your data is saved.

          + */ +export interface File { + /** + *

          The name of the Amazon S3 bucket from which data is imported.

          + */ + bucket: string | undefined; + + /** + *

          The key of the Amazon S3 object that contains your data. Each object has a key that is a + * unique identifier. Each object has exactly one key.

          + */ + key: string | undefined; + + /** + *

          The version ID to identify a specific version of the Amazon S3 object that contains your data.

          + */ + versionId?: string; +} + +export namespace File { + /** + * @internal + */ + export const filterSensitiveLog = (obj: File): any => ({ + ...obj, + }); +} + +export enum ColumnName { + ALIAS = "ALIAS", + ASSET_ID = "ASSET_ID", + DATA_TYPE = "DATA_TYPE", + PROPERTY_ID = "PROPERTY_ID", + QUALITY = "QUALITY", + TIMESTAMP_NANO_OFFSET = "TIMESTAMP_NANO_OFFSET", + TIMESTAMP_SECONDS = "TIMESTAMP_SECONDS", + VALUE = "VALUE", +} + +/** + *

          A .csv file.

          + */ +export interface Csv { + /** + *

          The column names specified in the .csv file.

          + */ + columnNames?: (ColumnName | string)[]; +} + +export namespace Csv { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Csv): any => ({ + ...obj, + }); +} + +/** + *

          The file format of the data.

          + */ +export interface FileFormat { + /** + *

          The .csv file format.

          + */ + csv?: Csv; +} + +export namespace FileFormat { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FileFormat): any => ({ + ...obj, + }); +} + +/** + *

          Contains the configuration information of a job, such as the file format used to save data in Amazon S3.

          + */ +export interface JobConfiguration { + /** + *

          The file format of the data in Amazon S3.

          + */ + fileFormat: FileFormat | undefined; +} + +export namespace JobConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: JobConfiguration): any => ({ + ...obj, + }); +} + +export interface CreateBulkImportJobRequest { + /** + *

          The unique name that helps identify the job request.

          + */ + jobName: string | undefined; + + /** + *

          The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.

          + */ + jobRoleArn: string | undefined; + + /** + *

          The files in the specified Amazon S3 bucket that contain your data.

          + */ + files: File[] | undefined; + + /** + *

          The Amazon S3 destination where errors associated with the job creation request are saved.

          + */ + errorReportLocation: ErrorReportLocation | undefined; + + /** + *

          Contains the configuration information of a job, such as the file format used to save data in Amazon S3.

          + */ + jobConfiguration: JobConfiguration | undefined; +} + +export namespace CreateBulkImportJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateBulkImportJobRequest): any => ({ + ...obj, + }); +} + +export enum JobStatus { + CANCELLED = "CANCELLED", + COMPLETED = "COMPLETED", + COMPLETED_WITH_FAILURES = "COMPLETED_WITH_FAILURES", + FAILED = "FAILED", + PENDING = "PENDING", + RUNNING = "RUNNING", +} + +export interface CreateBulkImportJobResponse { + /** + *

          The ID of the job.

          + */ + jobId: string | undefined; + + /** + *

          The unique name that helps identify the job request.

          + */ + jobName: string | undefined; + + /** + *

          The status of the bulk import job can be one of following values.

          + *
            + *
          • + *

            + * PENDING – IoT SiteWise is waiting for the current bulk import job to finish.

            + *
          • + *
          • + *

            + * CANCELLED – The bulk import job has been canceled.

            + *
          • + *
          • + *

            + * RUNNING – IoT SiteWise is processing your request to import your data from Amazon S3.

            + *
          • + *
          • + *

            + * COMPLETED – IoT SiteWise successfully completed your request to import data from Amazon S3.

            + *
          • + *
          • + *

            + * FAILED – IoT SiteWise couldn't process your request to import data from Amazon S3. + * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            + *
          • + *
          • + *

            + * COMPLETED_WITH_FAILURES – IoT SiteWise completed your request to import data from Amazon S3 with errors. + * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            + *
          • + *
          + */ + jobStatus: JobStatus | string | undefined; +} + +export namespace CreateBulkImportJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateBulkImportJobResponse): any => ({ + ...obj, + }); +} + export interface CreateDashboardRequest { /** *

          The ID of the project in which to create the dashboard.

          @@ -4241,6 +4466,106 @@ export namespace DescribeAssetPropertyResponse { }); } +export interface DescribeBulkImportJobRequest { + /** + *

          The ID of the job.

          + */ + jobId: string | undefined; +} + +export namespace DescribeBulkImportJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeBulkImportJobRequest): any => ({ + ...obj, + }); +} + +export interface DescribeBulkImportJobResponse { + /** + *

          The ID of the job.

          + */ + jobId: string | undefined; + + /** + *

          The unique name that helps identify the job request.

          + */ + jobName: string | undefined; + + /** + *

          The status of the bulk import job can be one of following values.

          + *
            + *
          • + *

            + * PENDING – IoT SiteWise is waiting for the current bulk import job to finish.

            + *
          • + *
          • + *

            + * CANCELLED – The bulk import job has been canceled.

            + *
          • + *
          • + *

            + * RUNNING – IoT SiteWise is processing your request to import your data from Amazon S3.

            + *
          • + *
          • + *

            + * COMPLETED – IoT SiteWise successfully completed your request to import data from Amazon S3.

            + *
          • + *
          • + *

            + * FAILED – IoT SiteWise couldn't process your request to import data from Amazon S3. + * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            + *
          • + *
          • + *

            + * COMPLETED_WITH_FAILURES – IoT SiteWise completed your request to import data from Amazon S3 with errors. + * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            + *
          • + *
          + */ + jobStatus: JobStatus | string | undefined; + + /** + *

          The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.

          + */ + jobRoleArn: string | undefined; + + /** + *

          The files in the specified Amazon S3 bucket that contain your data.

          + */ + files: File[] | undefined; + + /** + *

          The Amazon S3 destination where errors associated with the job creation request are saved.

          + */ + errorReportLocation: ErrorReportLocation | undefined; + + /** + *

          Contains the configuration information of a job, such as the file format used to save data in Amazon S3.

          + */ + jobConfiguration: JobConfiguration | undefined; + + /** + *

          The date the job was created, in Unix epoch TIME.

          + */ + jobCreationDate: Date | undefined; + + /** + *

          The date the job was last updated, in Unix epoch time.

          + */ + jobLastUpdateDate: Date | undefined; +} + +export namespace DescribeBulkImportJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeBulkImportJobResponse): any => ({ + ...obj, + }); +} + export interface DescribeDashboardRequest { /** *

          The ID of the dashboard.

          @@ -5911,6 +6236,120 @@ export namespace ListAssociatedAssetsResponse { }); } +export enum ListBulkImportJobsFilter { + ALL = "ALL", + CANCELLED = "CANCELLED", + COMPLETED = "COMPLETED", + COMPLETED_WITH_FAILURES = "COMPLETED_WITH_FAILURES", + FAILED = "FAILED", + PENDING = "PENDING", + RUNNING = "RUNNING", +} + +export interface ListBulkImportJobsRequest { + /** + *

          The token to be used for the next set of paginated results.

          + */ + nextToken?: string; + + /** + *

          The maximum number of results to return for each paginated request.

          + */ + maxResults?: number; + + /** + *

          You can use a filter to select the bulk import jobs that you want to retrieve.

          + */ + filter?: ListBulkImportJobsFilter | string; +} + +export namespace ListBulkImportJobsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListBulkImportJobsRequest): any => ({ + ...obj, + }); +} + +/** + *

          Contains a job summary information.

          + */ +export interface JobSummary { + /** + *

          The ID of the job.

          + */ + id: string | undefined; + + /** + *

          The unique name that helps identify the job request.

          + */ + name: string | undefined; + + /** + *

          The status of the bulk import job can be one of following values.

          + *
            + *
          • + *

            + * PENDING – IoT SiteWise is waiting for the current bulk import job to finish.

            + *
          • + *
          • + *

            + * CANCELLED – The bulk import job has been canceled.

            + *
          • + *
          • + *

            + * RUNNING – IoT SiteWise is processing your request to import your data from Amazon S3.

            + *
          • + *
          • + *

            + * COMPLETED – IoT SiteWise successfully completed your request to import data from Amazon S3.

            + *
          • + *
          • + *

            + * FAILED – IoT SiteWise couldn't process your request to import data from Amazon S3. + * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            + *
          • + *
          • + *

            + * COMPLETED_WITH_FAILURES – IoT SiteWise completed your request to import data from Amazon S3 with errors. + * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            + *
          • + *
          + */ + status: JobStatus | string | undefined; +} + +export namespace JobSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: JobSummary): any => ({ + ...obj, + }); +} + +export interface ListBulkImportJobsResponse { + /** + *

          One or more job summaries to list.

          + */ + jobSummaries: JobSummary[] | undefined; + + /** + *

          The token for the next set of results, or null if there are no additional results.

          + */ + nextToken?: string; +} + +export namespace ListBulkImportJobsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListBulkImportJobsResponse): any => ({ + ...obj, + }); +} + export interface ListDashboardsRequest { /** *

          The ID of the project.

          diff --git a/clients/client-iotsitewise/src/pagination/ListBulkImportJobsPaginator.ts b/clients/client-iotsitewise/src/pagination/ListBulkImportJobsPaginator.ts new file mode 100644 index 0000000000000..3196a6d3938c5 --- /dev/null +++ b/clients/client-iotsitewise/src/pagination/ListBulkImportJobsPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListBulkImportJobsCommand, + ListBulkImportJobsCommandInput, + ListBulkImportJobsCommandOutput, +} from "../commands/ListBulkImportJobsCommand"; +import { IoTSiteWise } from "../IoTSiteWise"; +import { IoTSiteWiseClient } from "../IoTSiteWiseClient"; +import { IoTSiteWisePaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: IoTSiteWiseClient, + input: ListBulkImportJobsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListBulkImportJobsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: IoTSiteWise, + input: ListBulkImportJobsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listBulkImportJobs(input, ...args); +}; +export async function* paginateListBulkImportJobs( + config: IoTSiteWisePaginationConfiguration, + input: ListBulkImportJobsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListBulkImportJobsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof IoTSiteWise) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof IoTSiteWiseClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected IoTSiteWise | IoTSiteWiseClient"); + } + yield page; + const prevToken = token; + token = page.nextToken; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-iotsitewise/src/pagination/index.ts b/clients/client-iotsitewise/src/pagination/index.ts index 46827c7bb349e..7561d8a95a547 100644 --- a/clients/client-iotsitewise/src/pagination/index.ts +++ b/clients/client-iotsitewise/src/pagination/index.ts @@ -16,6 +16,7 @@ export * from "./ListAssetModelsPaginator"; export * from "./ListAssetRelationshipsPaginator"; export * from "./ListAssetsPaginator"; export * from "./ListAssociatedAssetsPaginator"; +export * from "./ListBulkImportJobsPaginator"; export * from "./ListDashboardsPaginator"; export * from "./ListGatewaysPaginator"; export * from "./ListPortalsPaginator"; diff --git a/clients/client-iotsitewise/src/protocols/Aws_restJson1.ts b/clients/client-iotsitewise/src/protocols/Aws_restJson1.ts index a3c2e131872a0..c9641858da107 100644 --- a/clients/client-iotsitewise/src/protocols/Aws_restJson1.ts +++ b/clients/client-iotsitewise/src/protocols/Aws_restJson1.ts @@ -57,6 +57,10 @@ import { import { CreateAccessPolicyCommandInput, CreateAccessPolicyCommandOutput } from "../commands/CreateAccessPolicyCommand"; import { CreateAssetCommandInput, CreateAssetCommandOutput } from "../commands/CreateAssetCommand"; import { CreateAssetModelCommandInput, CreateAssetModelCommandOutput } from "../commands/CreateAssetModelCommand"; +import { + CreateBulkImportJobCommandInput, + CreateBulkImportJobCommandOutput, +} from "../commands/CreateBulkImportJobCommand"; import { CreateDashboardCommandInput, CreateDashboardCommandOutput } from "../commands/CreateDashboardCommand"; import { CreateGatewayCommandInput, CreateGatewayCommandOutput } from "../commands/CreateGatewayCommand"; import { CreatePortalCommandInput, CreatePortalCommandOutput } from "../commands/CreatePortalCommand"; @@ -79,6 +83,10 @@ import { DescribeAssetPropertyCommandInput, DescribeAssetPropertyCommandOutput, } from "../commands/DescribeAssetPropertyCommand"; +import { + DescribeBulkImportJobCommandInput, + DescribeBulkImportJobCommandOutput, +} from "../commands/DescribeBulkImportJobCommand"; import { DescribeDashboardCommandInput, DescribeDashboardCommandOutput } from "../commands/DescribeDashboardCommand"; import { DescribeDefaultEncryptionConfigurationCommandInput, @@ -132,6 +140,7 @@ import { ListAssociatedAssetsCommandInput, ListAssociatedAssetsCommandOutput, } from "../commands/ListAssociatedAssetsCommand"; +import { ListBulkImportJobsCommandInput, ListBulkImportJobsCommandOutput } from "../commands/ListBulkImportJobsCommand"; import { ListDashboardsCommandInput, ListDashboardsCommandOutput } from "../commands/ListDashboardsCommand"; import { ListGatewaysCommandInput, ListGatewaysCommandOutput } from "../commands/ListGatewaysCommand"; import { ListPortalsCommandInput, ListPortalsCommandOutput } from "../commands/ListPortalsCommand"; @@ -211,15 +220,20 @@ import { BatchGetAssetPropertyValueSuccessEntry, BatchPutAssetPropertyError, BatchPutAssetPropertyErrorEntry, + ColumnName, CompositeModelProperty, ConfigurationErrorDetails, ConfigurationStatus, ConflictingOperationException, + Csv, CustomerManagedS3Storage, DashboardSummary, DetailedError, ErrorDetails, + ErrorReportLocation, ExpressionVariable, + File, + FileFormat, ForwardingConfig, GatewayCapabilitySummary, GatewayPlatform, @@ -236,6 +250,8 @@ import { InternalFailureException, InterpolatedAssetPropertyValue, InvalidRequestException, + JobConfiguration, + JobSummary, LimitExceededException, LoggingOptions, Measurement, @@ -697,6 +713,45 @@ export const serializeAws_restJson1CreateAssetModelCommand = async ( }); }; +export const serializeAws_restJson1CreateBulkImportJobCommand = async ( + input: CreateBulkImportJobCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/jobs"; + let body: any; + body = JSON.stringify({ + ...(input.errorReportLocation != null && { + errorReportLocation: serializeAws_restJson1ErrorReportLocation(input.errorReportLocation, context), + }), + ...(input.files != null && { files: serializeAws_restJson1Files(input.files, context) }), + ...(input.jobConfiguration != null && { + jobConfiguration: serializeAws_restJson1JobConfiguration(input.jobConfiguration, context), + }), + ...(input.jobName != null && { jobName: input.jobName }), + ...(input.jobRoleArn != null && { jobRoleArn: input.jobRoleArn }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "data." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1CreateDashboardCommand = async ( input: CreateDashboardCommandInput, context: __SerdeContext @@ -1307,6 +1362,41 @@ export const serializeAws_restJson1DescribeAssetPropertyCommand = async ( }); }; +export const serializeAws_restJson1DescribeBulkImportJobCommand = async ( + input: DescribeBulkImportJobCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/jobs/{jobId}"; + if (input.jobId !== undefined) { + const labelValue: string = input.jobId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: jobId."); + } + resolvedPath = resolvedPath.replace("{jobId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: jobId."); + } + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "data." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1DescribeDashboardCommand = async ( input: DescribeDashboardCommandInput, context: __SerdeContext @@ -2041,6 +2131,38 @@ export const serializeAws_restJson1ListAssociatedAssetsCommand = async ( }); }; +export const serializeAws_restJson1ListBulkImportJobsCommand = async ( + input: ListBulkImportJobsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/jobs"; + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + ...(input.filter !== undefined && { filter: input.filter }), + }; + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "data." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1ListDashboardsCommand = async ( input: ListDashboardsCommandInput, context: __SerdeContext @@ -3566,6 +3688,77 @@ const deserializeAws_restJson1CreateAssetModelCommandError = async ( } }; +export const deserializeAws_restJson1CreateBulkImportJobCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 202 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateBulkImportJobCommandError(output, context); + } + const contents: CreateBulkImportJobCommandOutput = { + $metadata: deserializeMetadata(output), + jobId: undefined, + jobName: undefined, + jobStatus: undefined, + }; + const data: Record = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.jobId !== undefined && data.jobId !== null) { + contents.jobId = __expectString(data.jobId); + } + if (data.jobName !== undefined && data.jobName !== null) { + contents.jobName = __expectString(data.jobName); + } + if (data.jobStatus !== undefined && data.jobStatus !== null) { + contents.jobStatus = __expectString(data.jobStatus); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateBulkImportJobCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictingOperationException": + case "com.amazonaws.iotsitewise#ConflictingOperationException": + throw await deserializeAws_restJson1ConflictingOperationExceptionResponse(parsedOutput, context); + case "InternalFailureException": + case "com.amazonaws.iotsitewise#InternalFailureException": + throw await deserializeAws_restJson1InternalFailureExceptionResponse(parsedOutput, context); + case "InvalidRequestException": + case "com.amazonaws.iotsitewise#InvalidRequestException": + throw await deserializeAws_restJson1InvalidRequestExceptionResponse(parsedOutput, context); + case "LimitExceededException": + case "com.amazonaws.iotsitewise#LimitExceededException": + throw await deserializeAws_restJson1LimitExceededExceptionResponse(parsedOutput, context); + case "ResourceAlreadyExistsException": + case "com.amazonaws.iotsitewise#ResourceAlreadyExistsException": + throw await deserializeAws_restJson1ResourceAlreadyExistsExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.iotsitewise#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.iotsitewise#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1CreateDashboardCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -4589,6 +4782,92 @@ const deserializeAws_restJson1DescribeAssetPropertyCommandError = async ( } }; +export const deserializeAws_restJson1DescribeBulkImportJobCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DescribeBulkImportJobCommandError(output, context); + } + const contents: DescribeBulkImportJobCommandOutput = { + $metadata: deserializeMetadata(output), + errorReportLocation: undefined, + files: undefined, + jobConfiguration: undefined, + jobCreationDate: undefined, + jobId: undefined, + jobLastUpdateDate: undefined, + jobName: undefined, + jobRoleArn: undefined, + jobStatus: undefined, + }; + const data: Record = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.errorReportLocation !== undefined && data.errorReportLocation !== null) { + contents.errorReportLocation = deserializeAws_restJson1ErrorReportLocation(data.errorReportLocation, context); + } + if (data.files !== undefined && data.files !== null) { + contents.files = deserializeAws_restJson1Files(data.files, context); + } + if (data.jobConfiguration !== undefined && data.jobConfiguration !== null) { + contents.jobConfiguration = deserializeAws_restJson1JobConfiguration(data.jobConfiguration, context); + } + if (data.jobCreationDate !== undefined && data.jobCreationDate !== null) { + contents.jobCreationDate = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.jobCreationDate))); + } + if (data.jobId !== undefined && data.jobId !== null) { + contents.jobId = __expectString(data.jobId); + } + if (data.jobLastUpdateDate !== undefined && data.jobLastUpdateDate !== null) { + contents.jobLastUpdateDate = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.jobLastUpdateDate))); + } + if (data.jobName !== undefined && data.jobName !== null) { + contents.jobName = __expectString(data.jobName); + } + if (data.jobRoleArn !== undefined && data.jobRoleArn !== null) { + contents.jobRoleArn = __expectString(data.jobRoleArn); + } + if (data.jobStatus !== undefined && data.jobStatus !== null) { + contents.jobStatus = __expectString(data.jobStatus); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DescribeBulkImportJobCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalFailureException": + case "com.amazonaws.iotsitewise#InternalFailureException": + throw await deserializeAws_restJson1InternalFailureExceptionResponse(parsedOutput, context); + case "InvalidRequestException": + case "com.amazonaws.iotsitewise#InvalidRequestException": + throw await deserializeAws_restJson1InvalidRequestExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.iotsitewise#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.iotsitewise#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1DescribeDashboardCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -5926,6 +6205,64 @@ const deserializeAws_restJson1ListAssociatedAssetsCommandError = async ( } }; +export const deserializeAws_restJson1ListBulkImportJobsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListBulkImportJobsCommandError(output, context); + } + const contents: ListBulkImportJobsCommandOutput = { + $metadata: deserializeMetadata(output), + jobSummaries: undefined, + nextToken: undefined, + }; + const data: Record = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.jobSummaries !== undefined && data.jobSummaries !== null) { + contents.jobSummaries = deserializeAws_restJson1JobSummaries(data.jobSummaries, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListBulkImportJobsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalFailureException": + case "com.amazonaws.iotsitewise#InternalFailureException": + throw await deserializeAws_restJson1InternalFailureExceptionResponse(parsedOutput, context); + case "InvalidRequestException": + case "com.amazonaws.iotsitewise#InvalidRequestException": + throw await deserializeAws_restJson1InvalidRequestExceptionResponse(parsedOutput, context); + case "ResourceNotFoundException": + case "com.amazonaws.iotsitewise#ResourceNotFoundException": + throw await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context); + case "ThrottlingException": + case "com.amazonaws.iotsitewise#ThrottlingException": + throw await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_restJson1ListDashboardsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -7596,6 +7933,23 @@ const serializeAws_restJson1BatchGetAssetPropertyValueHistoryEntry = ( }; }; +const serializeAws_restJson1ColumnNames = (input: (ColumnName | string)[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1Csv = (input: Csv, context: __SerdeContext): any => { + return { + ...(input.columnNames != null && { columnNames: serializeAws_restJson1ColumnNames(input.columnNames, context) }), + }; +}; + const serializeAws_restJson1CustomerManagedS3Storage = ( input: CustomerManagedS3Storage, context: __SerdeContext @@ -7606,6 +7960,13 @@ const serializeAws_restJson1CustomerManagedS3Storage = ( }; }; +const serializeAws_restJson1ErrorReportLocation = (input: ErrorReportLocation, context: __SerdeContext): any => { + return { + ...(input.bucket != null && { bucket: input.bucket }), + ...(input.prefix != null && { prefix: input.prefix }), + }; +}; + const serializeAws_restJson1ExpressionVariable = (input: ExpressionVariable, context: __SerdeContext): any => { return { ...(input.name != null && { name: input.name }), @@ -7624,6 +7985,31 @@ const serializeAws_restJson1ExpressionVariables = (input: ExpressionVariable[], }); }; +const serializeAws_restJson1File = (input: File, context: __SerdeContext): any => { + return { + ...(input.bucket != null && { bucket: input.bucket }), + ...(input.key != null && { key: input.key }), + ...(input.versionId != null && { versionId: input.versionId }), + }; +}; + +const serializeAws_restJson1FileFormat = (input: FileFormat, context: __SerdeContext): any => { + return { + ...(input.csv != null && { csv: serializeAws_restJson1Csv(input.csv, context) }), + }; +}; + +const serializeAws_restJson1Files = (input: File[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1File(entry, context); + }); +}; + const serializeAws_restJson1ForwardingConfig = (input: ForwardingConfig, context: __SerdeContext): any => { return { ...(input.state != null && { state: input.state }), @@ -7703,6 +8089,12 @@ const serializeAws_restJson1ImageFile = (input: ImageFile, context: __SerdeConte }; }; +const serializeAws_restJson1JobConfiguration = (input: JobConfiguration, context: __SerdeContext): any => { + return { + ...(input.fileFormat != null && { fileFormat: serializeAws_restJson1FileFormat(input.fileFormat, context) }), + }; +}; + const serializeAws_restJson1LoggingOptions = (input: LoggingOptions, context: __SerdeContext): any => { return { ...(input.level != null && { level: input.level }), @@ -8681,6 +9073,18 @@ const deserializeAws_restJson1BatchPutAssetPropertyErrors = ( return retVal; }; +const deserializeAws_restJson1ColumnNames = (output: any, context: __SerdeContext): (ColumnName | string)[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); + return retVal; +}; + const deserializeAws_restJson1CompositeModelProperty = ( output: any, context: __SerdeContext @@ -8710,6 +9114,13 @@ const deserializeAws_restJson1ConfigurationStatus = (output: any, context: __Ser } as any; }; +const deserializeAws_restJson1Csv = (output: any, context: __SerdeContext): Csv => { + return { + columnNames: + output.columnNames != null ? deserializeAws_restJson1ColumnNames(output.columnNames, context) : undefined, + } as any; +}; + const deserializeAws_restJson1CustomerManagedS3Storage = ( output: any, context: __SerdeContext @@ -8775,6 +9186,13 @@ const deserializeAws_restJson1ErrorDetails = (output: any, context: __SerdeConte } as any; }; +const deserializeAws_restJson1ErrorReportLocation = (output: any, context: __SerdeContext): ErrorReportLocation => { + return { + bucket: __expectString(output.bucket), + prefix: __expectString(output.prefix), + } as any; +}; + const deserializeAws_restJson1ExpressionVariable = (output: any, context: __SerdeContext): ExpressionVariable => { return { name: __expectString(output.name), @@ -8794,6 +9212,32 @@ const deserializeAws_restJson1ExpressionVariables = (output: any, context: __Ser return retVal; }; +const deserializeAws_restJson1File = (output: any, context: __SerdeContext): File => { + return { + bucket: __expectString(output.bucket), + key: __expectString(output.key), + versionId: __expectString(output.versionId), + } as any; +}; + +const deserializeAws_restJson1FileFormat = (output: any, context: __SerdeContext): FileFormat => { + return { + csv: output.csv != null ? deserializeAws_restJson1Csv(output.csv, context) : undefined, + } as any; +}; + +const deserializeAws_restJson1Files = (output: any, context: __SerdeContext): File[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1File(entry, context); + }); + return retVal; +}; + const deserializeAws_restJson1ForwardingConfig = (output: any, context: __SerdeContext): ForwardingConfig => { return { state: __expectString(output.state), @@ -8939,6 +9383,32 @@ const deserializeAws_restJson1InterpolatedAssetPropertyValues = ( return retVal; }; +const deserializeAws_restJson1JobConfiguration = (output: any, context: __SerdeContext): JobConfiguration => { + return { + fileFormat: output.fileFormat != null ? deserializeAws_restJson1FileFormat(output.fileFormat, context) : undefined, + } as any; +}; + +const deserializeAws_restJson1JobSummaries = (output: any, context: __SerdeContext): JobSummary[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1JobSummary(entry, context); + }); + return retVal; +}; + +const deserializeAws_restJson1JobSummary = (output: any, context: __SerdeContext): JobSummary => { + return { + id: __expectString(output.id), + name: __expectString(output.name), + status: __expectString(output.status), + } as any; +}; + const deserializeAws_restJson1LoggingOptions = (output: any, context: __SerdeContext): LoggingOptions => { return { level: __expectString(output.level), diff --git a/codegen/sdk-codegen/aws-models/iotsitewise.json b/codegen/sdk-codegen/aws-models/iotsitewise.json index bf602728746b6..96e36780f23bb 100644 --- a/codegen/sdk-codegen/aws-models/iotsitewise.json +++ b/codegen/sdk-codegen/aws-models/iotsitewise.json @@ -91,6 +91,9 @@ { "target": "com.amazonaws.iotsitewise#CreateAssetModel" }, + { + "target": "com.amazonaws.iotsitewise#CreateBulkImportJob" + }, { "target": "com.amazonaws.iotsitewise#CreateDashboard" }, @@ -139,6 +142,9 @@ { "target": "com.amazonaws.iotsitewise#DescribeAssetProperty" }, + { + "target": "com.amazonaws.iotsitewise#DescribeBulkImportJob" + }, { "target": "com.amazonaws.iotsitewise#DescribeDashboard" }, @@ -199,6 +205,9 @@ { "target": "com.amazonaws.iotsitewise#ListAssociatedAssets" }, + { + "target": "com.amazonaws.iotsitewise#ListBulkImportJobs" + }, { "target": "com.amazonaws.iotsitewise#ListDashboards" }, @@ -2725,6 +2734,15 @@ } } }, + "com.amazonaws.iotsitewise#Bucket": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 63 + } + } + }, "com.amazonaws.iotsitewise#CapabilityConfiguration": { "type": "string", "traits": { @@ -2777,6 +2795,51 @@ "smithy.api#pattern": "^\\S{36,64}$" } }, + "com.amazonaws.iotsitewise#ColumnName": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ALIAS", + "name": "ALIAS" + }, + { + "value": "ASSET_ID", + "name": "ASSET_ID" + }, + { + "value": "PROPERTY_ID", + "name": "PROPERTY_ID" + }, + { + "value": "DATA_TYPE", + "name": "DATA_TYPE" + }, + { + "value": "TIMESTAMP_SECONDS", + "name": "TIMESTAMP_SECONDS" + }, + { + "value": "TIMESTAMP_NANO_OFFSET", + "name": "TIMESTAMP_NANO_OFFSET" + }, + { + "value": "QUALITY", + "name": "QUALITY" + }, + { + "value": "VALUE", + "name": "VALUE" + } + ] + } + }, + "com.amazonaws.iotsitewise#ColumnNames": { + "type": "list", + "member": { + "target": "com.amazonaws.iotsitewise#ColumnName" + } + }, "com.amazonaws.iotsitewise#CompositeModelProperty": { "type": "structure", "members": { @@ -3241,6 +3304,115 @@ } } }, + "com.amazonaws.iotsitewise#CreateBulkImportJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.iotsitewise#CreateBulkImportJobRequest" + }, + "output": { + "target": "com.amazonaws.iotsitewise#CreateBulkImportJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iotsitewise#ConflictingOperationException" + }, + { + "target": "com.amazonaws.iotsitewise#InternalFailureException" + }, + { + "target": "com.amazonaws.iotsitewise#InvalidRequestException" + }, + { + "target": "com.amazonaws.iotsitewise#LimitExceededException" + }, + { + "target": "com.amazonaws.iotsitewise#ResourceAlreadyExistsException" + }, + { + "target": "com.amazonaws.iotsitewise#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iotsitewise#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "\n

          This API operation is in preview release for IoT SiteWise and is subject to change. \n We recommend that you use this operation only with test data, and not in production environments.

          \n
          \n

          Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, \n see Create a bulk import job (CLI) \n in the Amazon Simple Storage Service User Guide.

          \n \n

          You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job. \n For more information about how to configure storage settings, \n see PutStorageConfiguration.

          \n
          ", + "smithy.api#endpoint": { + "hostPrefix": "data." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/jobs", + "code": 202 + } + } + }, + "com.amazonaws.iotsitewise#CreateBulkImportJobRequest": { + "type": "structure", + "members": { + "jobName": { + "target": "com.amazonaws.iotsitewise#Name", + "traits": { + "smithy.api#documentation": "

          The unique name that helps identify the job request.

          ", + "smithy.api#required": {} + } + }, + "jobRoleArn": { + "target": "com.amazonaws.iotsitewise#ARN", + "traits": { + "smithy.api#documentation": "

          The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.

          ", + "smithy.api#required": {} + } + }, + "files": { + "target": "com.amazonaws.iotsitewise#Files", + "traits": { + "smithy.api#documentation": "

          The files in the specified Amazon S3 bucket that contain your data.

          ", + "smithy.api#required": {} + } + }, + "errorReportLocation": { + "target": "com.amazonaws.iotsitewise#ErrorReportLocation", + "traits": { + "smithy.api#documentation": "

          The Amazon S3 destination where errors associated with the job creation request are saved.

          ", + "smithy.api#required": {} + } + }, + "jobConfiguration": { + "target": "com.amazonaws.iotsitewise#JobConfiguration", + "traits": { + "smithy.api#documentation": "

          Contains the configuration information of a job, such as the file format used to save data in Amazon S3.

          ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iotsitewise#CreateBulkImportJobResponse": { + "type": "structure", + "members": { + "jobId": { + "target": "com.amazonaws.iotsitewise#ID", + "traits": { + "smithy.api#documentation": "

          The ID of the job.

          ", + "smithy.api#required": {} + } + }, + "jobName": { + "target": "com.amazonaws.iotsitewise#Name", + "traits": { + "smithy.api#documentation": "

          The unique name that helps identify the job request.

          ", + "smithy.api#required": {} + } + }, + "jobStatus": { + "target": "com.amazonaws.iotsitewise#JobStatus", + "traits": { + "smithy.api#documentation": "

          The status of the bulk import job can be one of following values.

          \n
            \n
          • \n

            \n PENDING – IoT SiteWise is waiting for the current bulk import job to finish.

            \n
          • \n
          • \n

            \n CANCELLED – The bulk import job has been canceled.

            \n
          • \n
          • \n

            \n RUNNING – IoT SiteWise is processing your request to import your data from Amazon S3.

            \n
          • \n
          • \n

            \n COMPLETED – IoT SiteWise successfully completed your request to import data from Amazon S3.

            \n
          • \n
          • \n

            \n FAILED – IoT SiteWise couldn't process your request to import data from Amazon S3. \n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            \n
          • \n
          • \n

            \n COMPLETED_WITH_FAILURES – IoT SiteWise completed your request to import data from Amazon S3 with errors.\n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            \n
          • \n
          ", + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.iotsitewise#CreateDashboard": { "type": "operation", "input": { @@ -3663,6 +3835,20 @@ } } }, + "com.amazonaws.iotsitewise#Csv": { + "type": "structure", + "members": { + "columnNames": { + "target": "com.amazonaws.iotsitewise#ColumnNames", + "traits": { + "smithy.api#documentation": "

          The column names specified in the .csv file.

          " + } + } + }, + "traits": { + "smithy.api#documentation": "

          A .csv file.

          " + } + }, "com.amazonaws.iotsitewise#CustomerManagedS3Storage": { "type": "structure", "members": { @@ -4773,6 +4959,121 @@ } } }, + "com.amazonaws.iotsitewise#DescribeBulkImportJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.iotsitewise#DescribeBulkImportJobRequest" + }, + "output": { + "target": "com.amazonaws.iotsitewise#DescribeBulkImportJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iotsitewise#InternalFailureException" + }, + { + "target": "com.amazonaws.iotsitewise#InvalidRequestException" + }, + { + "target": "com.amazonaws.iotsitewise#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iotsitewise#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "\n

          This API operation is in preview release for IoT SiteWise and is subject to change. \n We recommend that you use this operation only with test data, and not in production environments.

          \n
          \n

          Retrieves information about a bulk import job request. For more information, \n see Describe a bulk import job (CLI) \n in the Amazon Simple Storage Service User Guide.

          ", + "smithy.api#endpoint": { + "hostPrefix": "data." + }, + "smithy.api#http": { + "method": "GET", + "uri": "/jobs/{jobId}", + "code": 200 + } + } + }, + "com.amazonaws.iotsitewise#DescribeBulkImportJobRequest": { + "type": "structure", + "members": { + "jobId": { + "target": "com.amazonaws.iotsitewise#ID", + "traits": { + "smithy.api#documentation": "

          The ID of the job.

          ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iotsitewise#DescribeBulkImportJobResponse": { + "type": "structure", + "members": { + "jobId": { + "target": "com.amazonaws.iotsitewise#ID", + "traits": { + "smithy.api#documentation": "

          The ID of the job.

          ", + "smithy.api#required": {} + } + }, + "jobName": { + "target": "com.amazonaws.iotsitewise#Name", + "traits": { + "smithy.api#documentation": "

          The unique name that helps identify the job request.

          ", + "smithy.api#required": {} + } + }, + "jobStatus": { + "target": "com.amazonaws.iotsitewise#JobStatus", + "traits": { + "smithy.api#documentation": "

          The status of the bulk import job can be one of following values.

          \n
            \n
          • \n

            \n PENDING – IoT SiteWise is waiting for the current bulk import job to finish.

            \n
          • \n
          • \n

            \n CANCELLED – The bulk import job has been canceled.

            \n
          • \n
          • \n

            \n RUNNING – IoT SiteWise is processing your request to import your data from Amazon S3.

            \n
          • \n
          • \n

            \n COMPLETED – IoT SiteWise successfully completed your request to import data from Amazon S3.

            \n
          • \n
          • \n

            \n FAILED – IoT SiteWise couldn't process your request to import data from Amazon S3. \n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            \n
          • \n
          • \n

            \n COMPLETED_WITH_FAILURES – IoT SiteWise completed your request to import data from Amazon S3 with errors.\n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            \n
          • \n
          ", + "smithy.api#required": {} + } + }, + "jobRoleArn": { + "target": "com.amazonaws.iotsitewise#ARN", + "traits": { + "smithy.api#documentation": "

          The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.

          ", + "smithy.api#required": {} + } + }, + "files": { + "target": "com.amazonaws.iotsitewise#Files", + "traits": { + "smithy.api#documentation": "

          The files in the specified Amazon S3 bucket that contain your data.

          ", + "smithy.api#required": {} + } + }, + "errorReportLocation": { + "target": "com.amazonaws.iotsitewise#ErrorReportLocation", + "traits": { + "smithy.api#documentation": "

          The Amazon S3 destination where errors associated with the job creation request are saved.

          ", + "smithy.api#required": {} + } + }, + "jobConfiguration": { + "target": "com.amazonaws.iotsitewise#JobConfiguration", + "traits": { + "smithy.api#documentation": "

          Contains the configuration information of a job, such as the file format used to save data in Amazon S3.

          ", + "smithy.api#required": {} + } + }, + "jobCreationDate": { + "target": "com.amazonaws.iotsitewise#Timestamp", + "traits": { + "smithy.api#documentation": "

          The date the job was created, in Unix epoch TIME.

          ", + "smithy.api#required": {} + } + }, + "jobLastUpdateDate": { + "target": "com.amazonaws.iotsitewise#Timestamp", + "traits": { + "smithy.api#documentation": "

          The date the job was last updated, in Unix epoch time.

          ", + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.iotsitewise#DescribeDashboard": { "type": "operation", "input": { @@ -5956,6 +6257,28 @@ "com.amazonaws.iotsitewise#ErrorMessage": { "type": "string" }, + "com.amazonaws.iotsitewise#ErrorReportLocation": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.iotsitewise#Bucket", + "traits": { + "smithy.api#documentation": "

          The name of the Amazon S3 bucket to which errors associated with the bulk import job are sent.

          ", + "smithy.api#required": {} + } + }, + "prefix": { + "target": "com.amazonaws.iotsitewise#String", + "traits": { + "smithy.api#documentation": "

          Amazon S3 uses the prefix as a folder name to organize data in the bucket. \n Each Amazon S3 object has a key that is its unique identifier in the bucket. \n Each object in a bucket has exactly one key. The prefix must end with a forward slash (/). \n For more information, see Organizing objects using prefixes \n in the Amazon Simple Storage Service User Guide.

          ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

          The Amazon S3 destination where errors associated with the job creation request are saved.

          " + } + }, "com.amazonaws.iotsitewise#ExceptionMessage": { "type": "string" }, @@ -5996,6 +6319,54 @@ "target": "com.amazonaws.iotsitewise#ExpressionVariable" } }, + "com.amazonaws.iotsitewise#File": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.iotsitewise#Bucket", + "traits": { + "smithy.api#documentation": "

          The name of the Amazon S3 bucket from which data is imported.

          ", + "smithy.api#required": {} + } + }, + "key": { + "target": "com.amazonaws.iotsitewise#String", + "traits": { + "smithy.api#documentation": "

          The key of the Amazon S3 object that contains your data. Each object has a key that is a\n unique identifier. Each object has exactly one key.

          ", + "smithy.api#required": {} + } + }, + "versionId": { + "target": "com.amazonaws.iotsitewise#String", + "traits": { + "smithy.api#documentation": "

          The version ID to identify a specific version of the Amazon S3 object that contains your data.

          " + } + } + }, + "traits": { + "smithy.api#documentation": "

          The file in Amazon S3 where your data is saved.

          " + } + }, + "com.amazonaws.iotsitewise#FileFormat": { + "type": "structure", + "members": { + "csv": { + "target": "com.amazonaws.iotsitewise#Csv", + "traits": { + "smithy.api#documentation": "

          The .csv file format.

          " + } + } + }, + "traits": { + "smithy.api#documentation": "

          The file format of the data.

          " + } + }, + "com.amazonaws.iotsitewise#Files": { + "type": "list", + "member": { + "target": "com.amazonaws.iotsitewise#File" + } + }, "com.amazonaws.iotsitewise#ForwardingConfig": { "type": "structure", "members": { @@ -6972,6 +7343,87 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.iotsitewise#JobConfiguration": { + "type": "structure", + "members": { + "fileFormat": { + "target": "com.amazonaws.iotsitewise#FileFormat", + "traits": { + "smithy.api#documentation": "

          The file format of the data in Amazon S3.

          ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

          Contains the configuration information of a job, such as the file format used to save data in Amazon S3.

          " + } + }, + "com.amazonaws.iotsitewise#JobStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "CANCELLED", + "name": "CANCELLED" + }, + { + "value": "RUNNING", + "name": "RUNNING" + }, + { + "value": "COMPLETED", + "name": "COMPLETED" + }, + { + "value": "FAILED", + "name": "FAILED" + }, + { + "value": "COMPLETED_WITH_FAILURES", + "name": "COMPLETED_WITH_FAILURES" + } + ] + } + }, + "com.amazonaws.iotsitewise#JobSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.iotsitewise#JobSummary" + } + }, + "com.amazonaws.iotsitewise#JobSummary": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.iotsitewise#ID", + "traits": { + "smithy.api#documentation": "

          The ID of the job.

          ", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.iotsitewise#Name", + "traits": { + "smithy.api#documentation": "

          The unique name that helps identify the job request.

          ", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.iotsitewise#JobStatus", + "traits": { + "smithy.api#documentation": "

          The status of the bulk import job can be one of following values.

          \n
            \n
          • \n

            \n PENDING – IoT SiteWise is waiting for the current bulk import job to finish.

            \n
          • \n
          • \n

            \n CANCELLED – The bulk import job has been canceled.

            \n
          • \n
          • \n

            \n RUNNING – IoT SiteWise is processing your request to import your data from Amazon S3.

            \n
          • \n
          • \n

            \n COMPLETED – IoT SiteWise successfully completed your request to import data from Amazon S3.

            \n
          • \n
          • \n

            \n FAILED – IoT SiteWise couldn't process your request to import data from Amazon S3. \n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            \n
          • \n
          • \n

            \n COMPLETED_WITH_FAILURES – IoT SiteWise completed your request to import data from Amazon S3 with errors.\n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.

            \n
          • \n
          ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

          Contains a job summary information.

          " + } + }, "com.amazonaws.iotsitewise#KmsKeyId": { "type": "string", "traits": { @@ -7478,6 +7930,125 @@ } } }, + "com.amazonaws.iotsitewise#ListBulkImportJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.iotsitewise#ListBulkImportJobsRequest" + }, + "output": { + "target": "com.amazonaws.iotsitewise#ListBulkImportJobsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iotsitewise#InternalFailureException" + }, + { + "target": "com.amazonaws.iotsitewise#InvalidRequestException" + }, + { + "target": "com.amazonaws.iotsitewise#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iotsitewise#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "\n

          This API operation is in preview release for IoT SiteWise and is subject to change. \n We recommend that you use this operation only with test data, and not in production environments.

          \n
          \n

          Retrieves a paginated list of bulk import job requests. For more information, \n see List bulk import jobs (CLI) \n in the Amazon Simple Storage Service User Guide.

          ", + "smithy.api#endpoint": { + "hostPrefix": "data." + }, + "smithy.api#http": { + "method": "GET", + "uri": "/jobs", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "jobSummaries", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.iotsitewise#ListBulkImportJobsFilter": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ALL", + "name": "ALL" + }, + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "RUNNING", + "name": "RUNNING" + }, + { + "value": "CANCELLED", + "name": "CANCELLED" + }, + { + "value": "FAILED", + "name": "FAILED" + }, + { + "value": "COMPLETED_WITH_FAILURES", + "name": "COMPLETED_WITH_FAILURES" + }, + { + "value": "COMPLETED", + "name": "COMPLETED" + } + ] + } + }, + "com.amazonaws.iotsitewise#ListBulkImportJobsRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.iotsitewise#NextToken", + "traits": { + "smithy.api#documentation": "

          The token to be used for the next set of paginated results.

          ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.iotsitewise#MaxResults", + "traits": { + "smithy.api#documentation": "

          The maximum number of results to return for each paginated request.

          ", + "smithy.api#httpQuery": "maxResults" + } + }, + "filter": { + "target": "com.amazonaws.iotsitewise#ListBulkImportJobsFilter", + "traits": { + "smithy.api#documentation": "

          You can use a filter to select the bulk import jobs that you want to retrieve.

          ", + "smithy.api#httpQuery": "filter" + } + } + } + }, + "com.amazonaws.iotsitewise#ListBulkImportJobsResponse": { + "type": "structure", + "members": { + "jobSummaries": { + "target": "com.amazonaws.iotsitewise#JobSummaries", + "traits": { + "smithy.api#documentation": "

          One or more job summaries to list.

          ", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.iotsitewise#NextToken", + "traits": { + "smithy.api#documentation": "

          The token for the next set of results, or null if there are no additional results.

          " + } + } + } + }, "com.amazonaws.iotsitewise#ListDashboards": { "type": "operation", "input": { @@ -9189,6 +9760,9 @@ ] } }, + "com.amazonaws.iotsitewise#String": { + "type": "string" + }, "com.amazonaws.iotsitewise#TagKey": { "type": "string", "traits": { From e94c855cd87e381d4cbf34685fe4f68da1031ffa Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:02 +0000 Subject: [PATCH 22/31] feat(client-cloudwatch): Adding support for the suppression of Composite Alarm actions --- .../client-cloudwatch/src/models/models_0.ts | 161 +++++++++++++++++- .../src/protocols/Aws_query.ts | 35 ++++ .../sdk-codegen/aws-models/cloudwatch.json | 90 +++++++++- 3 files changed, 284 insertions(+), 2 deletions(-) diff --git a/clients/client-cloudwatch/src/models/models_0.ts b/clients/client-cloudwatch/src/models/models_0.ts index b65f6cdd77068..381a393b5f4a0 100644 --- a/clients/client-cloudwatch/src/models/models_0.ts +++ b/clients/client-cloudwatch/src/models/models_0.ts @@ -3,6 +3,12 @@ import { ExceptionOptionType as __ExceptionOptionType } from "@aws-sdk/smithy-cl import { CloudWatchServiceException as __BaseException } from "./CloudWatchServiceException"; +export enum ActionsSuppressedBy { + Alarm = "Alarm", + ExtensionPeriod = "ExtensionPeriod", + WaitPeriod = "WaitPeriod", +} + export type AlarmType = "CompositeAlarm" | "MetricAlarm"; export type HistoryItemType = "Action" | "ConfigurationUpdate" | "StateUpdate"; @@ -606,7 +612,7 @@ export interface CompositeAlarm { StateReasonData?: string; /** - *

          The time stamp of the last update to the alarm state.

          + *

          Tracks the timestamp of any state update, even if StateValue doesn't change.

          */ StateUpdatedTimestamp?: Date; @@ -614,6 +620,107 @@ export interface CompositeAlarm { *

          The state value for the alarm.

          */ StateValue?: StateValue | string; + + /** + *

          + * The timestamp + * of the last change + * to the alarm's StateValue. + *

          + */ + StateTransitionedTimestamp?: Date; + + /** + *

          + * When the value is ALARM, + * it means + * that the actions are suppressed + * because the suppressor alarm is + * in ALARM + * When the value is WaitPeriod, + * it means that + * the actions are suppressed + * because the composite alarm is waiting + * for the suppressor alarm + * to go + * into + * into the ALARM state. + * The maximum waiting time is as specified + * in ActionsSuppressorWaitPeriod. + * After this time, + * the composite alarm performs its actions. + * When the value is ExtensionPeriod, + * it means + * that the actions are suppressed + * because the composite alarm is waiting + * after the suppressor alarm went out + * of the ALARM state. + * The maximum waiting time is as specified + * in ActionsSuppressorExtensionPeriod. + * After this time, + * the composite alarm performs its actions. + *

          + */ + ActionsSuppressedBy?: ActionsSuppressedBy | string; + + /** + *

          + * Captures the reason for action suppression. + *

          + */ + ActionsSuppressedReason?: string; + + /** + *

          + * Actions will be suppressed + * if the suppressor alarm is + * in the ALARM state. + * ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) + * from an existing alarm. + *

          + */ + ActionsSuppressor?: string; + + /** + *

          + * The maximum time + * in seconds + * that the composite alarm waits + * for the suppressor alarm + * to go + * into the ALARM state. + * After this time, + * the composite alarm performs its actions. + *

          + * + *

          + * WaitPeriod + * is required only + * when ActionsSuppressor is specified. + *

          + *
          + */ + ActionsSuppressorWaitPeriod?: number; + + /** + *

          + * The maximum time + * in seconds + * that the composite alarm waits + * after suppressor alarm goes out + * of the ALARM state. + * After this time, + * the composite alarm performs its actions. + *

          + * + *

          + * ExtensionPeriod + * is required only + * when ActionsSuppressor is specified. + *

          + *
          + */ + ActionsSuppressorExtensionPeriod?: number; } export namespace CompositeAlarm { @@ -3398,6 +3505,58 @@ export interface PutCompositeAlarmInput { * certain tag values.

          */ Tags?: Tag[]; + + /** + *

          + * Actions will be suppressed + * if the suppressor alarm is + * in the ALARM state. + * ActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) + * from an existing alarm. + *

          + */ + ActionsSuppressor?: string; + + /** + *

          + * The maximum time + * in seconds + * that the composite alarm waits + * for the suppressor alarm + * to go + * into the ALARM state. + * After this time, + * the composite alarm performs its actions. + *

          + * + *

          + * WaitPeriod + * is required only + * when ActionsSuppressor is specified. + *

          + *
          + */ + ActionsSuppressorWaitPeriod?: number; + + /** + *

          + * The maximum time + * in seconds + * that the composite alarm waits + * after suppressor alarm goes out + * of the ALARM state. + * After this time, + * the composite alarm performs its actions. + *

          + * + *

          + * ExtensionPeriod + * is required only + * when ActionsSuppressor is specified. + *

          + *
          + */ + ActionsSuppressorExtensionPeriod?: number; } export namespace PutCompositeAlarmInput { diff --git a/clients/client-cloudwatch/src/protocols/Aws_query.ts b/clients/client-cloudwatch/src/protocols/Aws_query.ts index 309776eea0333..0ed768c0be17a 100644 --- a/clients/client-cloudwatch/src/protocols/Aws_query.ts +++ b/clients/client-cloudwatch/src/protocols/Aws_query.ts @@ -3741,6 +3741,15 @@ const serializeAws_queryPutCompositeAlarmInput = (input: PutCompositeAlarmInput, entries[loc] = value; }); } + if (input.ActionsSuppressor !== undefined && input.ActionsSuppressor !== null) { + entries["ActionsSuppressor"] = input.ActionsSuppressor; + } + if (input.ActionsSuppressorWaitPeriod !== undefined && input.ActionsSuppressorWaitPeriod !== null) { + entries["ActionsSuppressorWaitPeriod"] = input.ActionsSuppressorWaitPeriod; + } + if (input.ActionsSuppressorExtensionPeriod !== undefined && input.ActionsSuppressorExtensionPeriod !== null) { + entries["ActionsSuppressorExtensionPeriod"] = input.ActionsSuppressorExtensionPeriod; + } return entries; }; @@ -4296,6 +4305,12 @@ const deserializeAws_queryCompositeAlarm = (output: any, context: __SerdeContext StateReasonData: undefined, StateUpdatedTimestamp: undefined, StateValue: undefined, + StateTransitionedTimestamp: undefined, + ActionsSuppressedBy: undefined, + ActionsSuppressedReason: undefined, + ActionsSuppressor: undefined, + ActionsSuppressorWaitPeriod: undefined, + ActionsSuppressorExtensionPeriod: undefined, }; if (output["ActionsEnabled"] !== undefined) { contents.ActionsEnabled = __parseBoolean(output["ActionsEnabled"]); @@ -4356,6 +4371,26 @@ const deserializeAws_queryCompositeAlarm = (output: any, context: __SerdeContext if (output["StateValue"] !== undefined) { contents.StateValue = __expectString(output["StateValue"]); } + if (output["StateTransitionedTimestamp"] !== undefined) { + contents.StateTransitionedTimestamp = __expectNonNull(__parseRfc3339DateTime(output["StateTransitionedTimestamp"])); + } + if (output["ActionsSuppressedBy"] !== undefined) { + contents.ActionsSuppressedBy = __expectString(output["ActionsSuppressedBy"]); + } + if (output["ActionsSuppressedReason"] !== undefined) { + contents.ActionsSuppressedReason = __expectString(output["ActionsSuppressedReason"]); + } + if (output["ActionsSuppressor"] !== undefined) { + contents.ActionsSuppressor = __expectString(output["ActionsSuppressor"]); + } + if (output["ActionsSuppressorWaitPeriod"] !== undefined) { + contents.ActionsSuppressorWaitPeriod = __strictParseInt32(output["ActionsSuppressorWaitPeriod"]) as number; + } + if (output["ActionsSuppressorExtensionPeriod"] !== undefined) { + contents.ActionsSuppressorExtensionPeriod = __strictParseInt32( + output["ActionsSuppressorExtensionPeriod"] + ) as number; + } return contents; }; diff --git a/codegen/sdk-codegen/aws-models/cloudwatch.json b/codegen/sdk-codegen/aws-models/cloudwatch.json index 1f63679d05135..4c5aac6c270cb 100644 --- a/codegen/sdk-codegen/aws-models/cloudwatch.json +++ b/codegen/sdk-codegen/aws-models/cloudwatch.json @@ -53,6 +53,34 @@ "smithy.api#box": {} } }, + "com.amazonaws.cloudwatch#ActionsSuppressedBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "WaitPeriod", + "name": "WaitPeriod" + }, + { + "value": "ExtensionPeriod", + "name": "ExtensionPeriod" + }, + { + "value": "Alarm", + "name": "Alarm" + } + ] + } + }, + "com.amazonaws.cloudwatch#ActionsSuppressedReason": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, "com.amazonaws.cloudwatch#AlarmArn": { "type": "string", "traits": { @@ -472,7 +500,7 @@ "StateUpdatedTimestamp": { "target": "com.amazonaws.cloudwatch#Timestamp", "traits": { - "smithy.api#documentation": "

          The time stamp of the last update to the alarm state.

          " + "smithy.api#documentation": "

          Tracks the timestamp of any state update, even if StateValue doesn't change.

          " } }, "StateValue": { @@ -480,6 +508,42 @@ "traits": { "smithy.api#documentation": "

          The state value for the alarm.

          " } + }, + "StateTransitionedTimestamp": { + "target": "com.amazonaws.cloudwatch#Timestamp", + "traits": { + "smithy.api#documentation": "

          \n\t\t\tThe timestamp \n\t\t\tof the last change \n\t\t\tto the alarm's StateValue.\n\t\t

          " + } + }, + "ActionsSuppressedBy": { + "target": "com.amazonaws.cloudwatch#ActionsSuppressedBy", + "traits": { + "smithy.api#documentation": "

          \n\t\t\tWhen the value is ALARM, \n\t\t\tit means \n\t\t\tthat the actions are suppressed\n\t\t\tbecause the suppressor alarm is \n\t\t\tin ALARM\n\t\t\tWhen the value is WaitPeriod, \n\t\t\tit means that \n\t\t\tthe actions are suppressed \n\t\t\tbecause the composite alarm is waiting\n\t\t\tfor the suppressor alarm \n\t\t\tto go \n\t\t\tinto \n\t\t\tinto the ALARM state. \n\t\t\tThe maximum waiting time is as specified\n\t\t\tin ActionsSuppressorWaitPeriod. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t\tWhen the value is ExtensionPeriod,\n\t\t\tit means \n\t\t\tthat the actions are suppressed\n\t\t\tbecause the composite alarm is waiting \n\t\t\tafter the suppressor alarm went out\n\t\t\tof the ALARM state. \n\t\t\tThe maximum waiting time is as specified \n\t\t\tin ActionsSuppressorExtensionPeriod. \n\t\t\tAfter this time,\n\t\t\tthe composite alarm performs its actions.\n\t\t

          " + } + }, + "ActionsSuppressedReason": { + "target": "com.amazonaws.cloudwatch#ActionsSuppressedReason", + "traits": { + "smithy.api#documentation": "

          \n\t\t\tCaptures the reason for action suppression.\n\t\t

          " + } + }, + "ActionsSuppressor": { + "target": "com.amazonaws.cloudwatch#AlarmArn", + "traits": { + "smithy.api#documentation": "

          \n\t\t\tActions will be suppressed \n\t\t\tif the suppressor alarm is \n\t\t\tin the ALARM state. \n\t\t\tActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) \n\t\t\tfrom an existing alarm. \n\t\t

          " + } + }, + "ActionsSuppressorWaitPeriod": { + "target": "com.amazonaws.cloudwatch#SuppressorPeriod", + "traits": { + "smithy.api#documentation": "

          \n\t\t\tThe maximum time \n\t\t\tin seconds \n\t\t\tthat the composite alarm waits\n\t\t\tfor the suppressor alarm \n\t\t\tto go \n\t\t\tinto the ALARM state. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t

          \n\t\t \n\t\t\t

          \n\t\t\t\t WaitPeriod \n\t\t\t\tis required only \n\t\t\t\twhen ActionsSuppressor is specified.\n\t\t\t

          \n\t\t
          " + } + }, + "ActionsSuppressorExtensionPeriod": { + "target": "com.amazonaws.cloudwatch#SuppressorPeriod", + "traits": { + "smithy.api#documentation": "

          \n\t\t\tThe maximum time \n\t\t\tin seconds\n\t\t\tthat the composite alarm waits \n\t\t\tafter suppressor alarm goes out \n\t\t\tof the ALARM state. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t

          \n\t\t \n\t\t\t

          \n\t\t\t\t ExtensionPeriod \n\t\t\t\tis required only \n\t\t\t\twhen ActionsSuppressor is specified.\n\t\t\t

          \n\t\t
          " + } } }, "traits": { @@ -4029,6 +4093,24 @@ "traits": { "smithy.api#documentation": "

          A list of key-value pairs to associate with the composite alarm. You can associate as many as 50 tags with an alarm.

          \n\t\t

          Tags can help you organize and categorize your\n\t\t\tresources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with\n\t\t\tcertain tag values.

          " } + }, + "ActionsSuppressor": { + "target": "com.amazonaws.cloudwatch#AlarmArn", + "traits": { + "smithy.api#documentation": "

          \n\t\t\tActions will be suppressed \n\t\t\tif the suppressor alarm is \n\t\t\tin the ALARM state.\n\t\t\tActionsSuppressor can be an AlarmName or an Amazon Resource Name (ARN) \n\t\t\tfrom an existing alarm.\n\t\t

          " + } + }, + "ActionsSuppressorWaitPeriod": { + "target": "com.amazonaws.cloudwatch#SuppressorPeriod", + "traits": { + "smithy.api#documentation": "

          \n\t\t\tThe maximum time \n\t\t\tin seconds \n\t\t\tthat the composite alarm waits\n\t\t\tfor the suppressor alarm \n\t\t\tto go \n\t\t\tinto the ALARM state. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t

          \n\t\t \n\t\t\t

          \n\t\t\t\t WaitPeriod \n\t\t\t\tis required only \n\t\t\t\twhen ActionsSuppressor is specified.\n\t\t\t

          \n\t\t
          " + } + }, + "ActionsSuppressorExtensionPeriod": { + "target": "com.amazonaws.cloudwatch#SuppressorPeriod", + "traits": { + "smithy.api#documentation": "

          \n\t\t\tThe maximum time \n\t\t\tin seconds\n\t\t\tthat the composite alarm waits \n\t\t\tafter suppressor alarm goes out \n\t\t\tof the ALARM state. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t

          \n\t\t \n\t\t\t

          \n\t\t\t\t ExtensionPeriod \n\t\t\t\tis required only \n\t\t\t\twhen ActionsSuppressor is specified.\n\t\t\t

          \n\t\t
          " + } } } }, @@ -4986,6 +5068,12 @@ } } }, + "com.amazonaws.cloudwatch#SuppressorPeriod": { + "type": "integer", + "traits": { + "smithy.api#box": {} + } + }, "com.amazonaws.cloudwatch#Tag": { "type": "structure", "members": { From a2aef86781ea0833877ada3ed3dcdbaab5d700ec Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:02 +0000 Subject: [PATCH 23/31] feat(client-ec2-instance-connect): This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs. --- .../src/models/models_0.ts | 21 ++++++++++++++ .../src/protocols/Aws_json1_1.ts | 29 +++++++++++++++++++ .../aws-models/ec2-instance-connect.json | 23 +++++++++++++++ 3 files changed, 73 insertions(+) diff --git a/clients/client-ec2-instance-connect/src/models/models_0.ts b/clients/client-ec2-instance-connect/src/models/models_0.ts index 705135210a20a..6e8f25d2ae34d 100644 --- a/clients/client-ec2-instance-connect/src/models/models_0.ts +++ b/clients/client-ec2-instance-connect/src/models/models_0.ts @@ -90,6 +90,27 @@ export class EC2InstanceTypeInvalidException extends __BaseException { } } +/** + *

          The instance is currently unavailable. Wait a few minutes and try again.

          + */ +export class EC2InstanceUnavailableException extends __BaseException { + readonly name: "EC2InstanceUnavailableException" = "EC2InstanceUnavailableException"; + readonly $fault: "server" = "server"; + Message?: string; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "EC2InstanceUnavailableException", + $fault: "server", + ...opts, + }); + Object.setPrototypeOf(this, EC2InstanceUnavailableException.prototype); + this.Message = opts.Message; + } +} + /** *

          One of the parameters is not valid.

          */ diff --git a/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts b/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts index 287c7db997dae..c677abb234266 100644 --- a/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts +++ b/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts @@ -23,6 +23,7 @@ import { EC2InstanceNotFoundException, EC2InstanceStateInvalidException, EC2InstanceTypeInvalidException, + EC2InstanceUnavailableException, InvalidArgsException, SendSerialConsoleSSHPublicKeyRequest, SendSerialConsoleSSHPublicKeyResponse, @@ -101,6 +102,9 @@ const deserializeAws_json1_1SendSerialConsoleSSHPublicKeyCommandError = async ( case "EC2InstanceTypeInvalidException": case "com.amazonaws.ec2instanceconnect#EC2InstanceTypeInvalidException": throw await deserializeAws_json1_1EC2InstanceTypeInvalidExceptionResponse(parsedOutput, context); + case "EC2InstanceUnavailableException": + case "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException": + throw await deserializeAws_json1_1EC2InstanceUnavailableExceptionResponse(parsedOutput, context); case "InvalidArgsException": case "com.amazonaws.ec2instanceconnect#InvalidArgsException": throw await deserializeAws_json1_1InvalidArgsExceptionResponse(parsedOutput, context); @@ -169,6 +173,9 @@ const deserializeAws_json1_1SendSSHPublicKeyCommandError = async ( case "EC2InstanceStateInvalidException": case "com.amazonaws.ec2instanceconnect#EC2InstanceStateInvalidException": throw await deserializeAws_json1_1EC2InstanceStateInvalidExceptionResponse(parsedOutput, context); + case "EC2InstanceUnavailableException": + case "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException": + throw await deserializeAws_json1_1EC2InstanceUnavailableExceptionResponse(parsedOutput, context); case "InvalidArgsException": case "com.amazonaws.ec2instanceconnect#InvalidArgsException": throw await deserializeAws_json1_1InvalidArgsExceptionResponse(parsedOutput, context); @@ -243,6 +250,19 @@ const deserializeAws_json1_1EC2InstanceTypeInvalidExceptionResponse = async ( return __decorateServiceException(exception, body); }; +const deserializeAws_json1_1EC2InstanceUnavailableExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1EC2InstanceUnavailableException(body, context); + const exception = new EC2InstanceUnavailableException({ + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }); + return __decorateServiceException(exception, body); +}; + const deserializeAws_json1_1InvalidArgsExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -374,6 +394,15 @@ const deserializeAws_json1_1EC2InstanceTypeInvalidException = ( } as any; }; +const deserializeAws_json1_1EC2InstanceUnavailableException = ( + output: any, + context: __SerdeContext +): EC2InstanceUnavailableException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1InvalidArgsException = (output: any, context: __SerdeContext): InvalidArgsException => { return { Message: __expectString(output.Message), diff --git a/codegen/sdk-codegen/aws-models/ec2-instance-connect.json b/codegen/sdk-codegen/aws-models/ec2-instance-connect.json index f3d1624d48ee3..d425938d90e02 100644 --- a/codegen/sdk-codegen/aws-models/ec2-instance-connect.json +++ b/codegen/sdk-codegen/aws-models/ec2-instance-connect.json @@ -134,6 +134,23 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.ec2instanceconnect#String" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "EC2InstanceUnavailable", + "httpResponseCode": 503 + }, + "smithy.api#documentation": "

          The instance is currently unavailable. Wait a few minutes and try again.

          ", + "smithy.api#error": "server", + "smithy.api#httpError": 503 + } + }, "com.amazonaws.ec2instanceconnect#InstanceId": { "type": "string", "traits": { @@ -201,6 +218,9 @@ { "target": "com.amazonaws.ec2instanceconnect#EC2InstanceStateInvalidException" }, + { + "target": "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException" + }, { "target": "com.amazonaws.ec2instanceconnect#InvalidArgsException" }, @@ -285,6 +305,9 @@ { "target": "com.amazonaws.ec2instanceconnect#EC2InstanceTypeInvalidException" }, + { + "target": "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException" + }, { "target": "com.amazonaws.ec2instanceconnect#InvalidArgsException" }, From 6fe9621621e896fa31ad115c3865a35f00b12d3a Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:02 +0000 Subject: [PATCH 24/31] feat(client-athena): This feature allows customers to retrieve runtime statistics for completed queries --- clients/client-athena/src/Athena.ts | 40 +++ clients/client-athena/src/AthenaClient.ts | 6 + .../GetQueryRuntimeStatisticsCommand.ts | 102 +++++++ clients/client-athena/src/commands/index.ts | 1 + clients/client-athena/src/models/models_0.ts | 241 +++++++++++++++++ .../pagination/ListEngineVersionsPaginator.ts | 61 +++++ clients/client-athena/src/pagination/index.ts | 1 + .../src/protocols/Aws_json1_1.ts | 191 +++++++++++++ codegen/sdk-codegen/aws-models/athena.json | 255 +++++++++++++++++- 9 files changed, 897 insertions(+), 1 deletion(-) create mode 100644 clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts create mode 100644 clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts diff --git a/clients/client-athena/src/Athena.ts b/clients/client-athena/src/Athena.ts index 3cb12228fb926..3392c337a5c53 100644 --- a/clients/client-athena/src/Athena.ts +++ b/clients/client-athena/src/Athena.ts @@ -83,6 +83,11 @@ import { GetQueryResultsCommandInput, GetQueryResultsCommandOutput, } from "./commands/GetQueryResultsCommand"; +import { + GetQueryRuntimeStatisticsCommand, + GetQueryRuntimeStatisticsCommandInput, + GetQueryRuntimeStatisticsCommandOutput, +} from "./commands/GetQueryRuntimeStatisticsCommand"; import { GetTableMetadataCommand, GetTableMetadataCommandInput, @@ -772,6 +777,41 @@ export class Athena extends AthenaClient { } } + /** + *

          Returns query execution runtime statistics related to a single execution of a query if you + * have access to the workgroup in which the query ran. The query execution runtime statistics + * is returned only when QueryExecutionStatus$State is in a SUCCEEDED + * or FAILED state.

          + */ + public getQueryRuntimeStatistics( + args: GetQueryRuntimeStatisticsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getQueryRuntimeStatistics( + args: GetQueryRuntimeStatisticsCommandInput, + cb: (err: any, data?: GetQueryRuntimeStatisticsCommandOutput) => void + ): void; + public getQueryRuntimeStatistics( + args: GetQueryRuntimeStatisticsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetQueryRuntimeStatisticsCommandOutput) => void + ): void; + public getQueryRuntimeStatistics( + args: GetQueryRuntimeStatisticsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetQueryRuntimeStatisticsCommandOutput) => void), + cb?: (err: any, data?: GetQueryRuntimeStatisticsCommandOutput) => void + ): Promise | void { + const command = new GetQueryRuntimeStatisticsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

          Returns table metadata for the specified catalog, database, and table.

          */ diff --git a/clients/client-athena/src/AthenaClient.ts b/clients/client-athena/src/AthenaClient.ts index 00fe431ef2d17..0254e0b16fd99 100644 --- a/clients/client-athena/src/AthenaClient.ts +++ b/clients/client-athena/src/AthenaClient.ts @@ -85,6 +85,10 @@ import { } from "./commands/GetPreparedStatementCommand"; import { GetQueryExecutionCommandInput, GetQueryExecutionCommandOutput } from "./commands/GetQueryExecutionCommand"; import { GetQueryResultsCommandInput, GetQueryResultsCommandOutput } from "./commands/GetQueryResultsCommand"; +import { + GetQueryRuntimeStatisticsCommandInput, + GetQueryRuntimeStatisticsCommandOutput, +} from "./commands/GetQueryRuntimeStatisticsCommand"; import { GetTableMetadataCommandInput, GetTableMetadataCommandOutput } from "./commands/GetTableMetadataCommand"; import { GetWorkGroupCommandInput, GetWorkGroupCommandOutput } from "./commands/GetWorkGroupCommand"; import { ListDatabasesCommandInput, ListDatabasesCommandOutput } from "./commands/ListDatabasesCommand"; @@ -139,6 +143,7 @@ export type ServiceInputTypes = | GetPreparedStatementCommandInput | GetQueryExecutionCommandInput | GetQueryResultsCommandInput + | GetQueryRuntimeStatisticsCommandInput | GetTableMetadataCommandInput | GetWorkGroupCommandInput | ListDataCatalogsCommandInput @@ -177,6 +182,7 @@ export type ServiceOutputTypes = | GetPreparedStatementCommandOutput | GetQueryExecutionCommandOutput | GetQueryResultsCommandOutput + | GetQueryRuntimeStatisticsCommandOutput | GetTableMetadataCommandOutput | GetWorkGroupCommandOutput | ListDataCatalogsCommandOutput diff --git a/clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts b/clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts new file mode 100644 index 0000000000000..1d8a4ebd78864 --- /dev/null +++ b/clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts @@ -0,0 +1,102 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AthenaClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AthenaClient"; +import { GetQueryRuntimeStatisticsInput, GetQueryRuntimeStatisticsOutput } from "../models/models_0"; +import { + deserializeAws_json1_1GetQueryRuntimeStatisticsCommand, + serializeAws_json1_1GetQueryRuntimeStatisticsCommand, +} from "../protocols/Aws_json1_1"; + +export interface GetQueryRuntimeStatisticsCommandInput extends GetQueryRuntimeStatisticsInput {} +export interface GetQueryRuntimeStatisticsCommandOutput extends GetQueryRuntimeStatisticsOutput, __MetadataBearer {} + +/** + *

          Returns query execution runtime statistics related to a single execution of a query if you + * have access to the workgroup in which the query ran. The query execution runtime statistics + * is returned only when QueryExecutionStatus$State is in a SUCCEEDED + * or FAILED state.

          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AthenaClient, GetQueryRuntimeStatisticsCommand } from "@aws-sdk/client-athena"; // ES Modules import + * // const { AthenaClient, GetQueryRuntimeStatisticsCommand } = require("@aws-sdk/client-athena"); // CommonJS import + * const client = new AthenaClient(config); + * const command = new GetQueryRuntimeStatisticsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetQueryRuntimeStatisticsCommandInput} for command's `input` shape. + * @see {@link GetQueryRuntimeStatisticsCommandOutput} for command's `response` shape. + * @see {@link AthenaClientResolvedConfig | config} for AthenaClient's `config` shape. + * + */ +export class GetQueryRuntimeStatisticsCommand extends $Command< + GetQueryRuntimeStatisticsCommandInput, + GetQueryRuntimeStatisticsCommandOutput, + AthenaClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetQueryRuntimeStatisticsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: AthenaClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "AthenaClient"; + const commandName = "GetQueryRuntimeStatisticsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetQueryRuntimeStatisticsInput.filterSensitiveLog, + outputFilterSensitiveLog: GetQueryRuntimeStatisticsOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetQueryRuntimeStatisticsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1GetQueryRuntimeStatisticsCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1GetQueryRuntimeStatisticsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-athena/src/commands/index.ts b/clients/client-athena/src/commands/index.ts index ff12196434bb3..302a165516561 100644 --- a/clients/client-athena/src/commands/index.ts +++ b/clients/client-athena/src/commands/index.ts @@ -16,6 +16,7 @@ export * from "./GetNamedQueryCommand"; export * from "./GetPreparedStatementCommand"; export * from "./GetQueryExecutionCommand"; export * from "./GetQueryResultsCommand"; +export * from "./GetQueryRuntimeStatisticsCommand"; export * from "./GetTableMetadataCommand"; export * from "./GetWorkGroupCommand"; export * from "./ListDataCatalogsCommand"; diff --git a/clients/client-athena/src/models/models_0.ts b/clients/client-athena/src/models/models_0.ts index 7ef1da6309e78..6f75b62d1dbfc 100644 --- a/clients/client-athena/src/models/models_0.ts +++ b/clients/client-athena/src/models/models_0.ts @@ -1873,6 +1873,103 @@ export namespace GetQueryResultsOutput { }); } +export interface GetQueryRuntimeStatisticsInput { + /** + *

          The unique ID of the query execution.

          + */ + QueryExecutionId: string | undefined; +} + +export namespace GetQueryRuntimeStatisticsInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryRuntimeStatisticsInput): any => ({ + ...obj, + }); +} + +/** + *

          Statistics such as input rows and bytes read by the query, rows and bytes output by the query, + * and the number of rows written by the query.

          + */ +export interface QueryRuntimeStatisticsRows { + /** + *

          The number of rows read to execute the query.

          + */ + InputRows?: number; + + /** + *

          The number of bytes read to execute the query.

          + */ + InputBytes?: number; + + /** + *

          The number of bytes returned by the query.

          + */ + OutputBytes?: number; + + /** + *

          The number of rows returned by the query.

          + */ + OutputRows?: number; +} + +export namespace QueryRuntimeStatisticsRows { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryRuntimeStatisticsRows): any => ({ + ...obj, + }); +} + +/** + *

          Timeline statistics such as query queue time, planning time, execution time, service processing + * time, and total execution time.

          + */ +export interface QueryRuntimeStatisticsTimeline { + /** + *

          The number of milliseconds that the query was in your query queue waiting for + * resources. Note that if transient errors occur, Athena might automatically + * add the query back to the queue.

          + */ + QueryQueueTimeInMillis?: number; + + /** + *

          The number of milliseconds that Athena took to plan the query processing + * flow. This includes the time spent retrieving table partitions from the data source. + * Note that because the query engine performs the query planning, query planning time is a + * subset of engine processing time.

          + */ + QueryPlanningTimeInMillis?: number; + + /** + *

          The number of milliseconds that the query took to execute.

          + */ + EngineExecutionTimeInMillis?: number; + + /** + *

          The number of milliseconds that Athena took to finalize and publish the + * query results after the query engine finished running the query.

          + */ + ServiceProcessingTimeInMillis?: number; + + /** + *

          The number of milliseconds that Athena took to run the query.

          + */ + TotalExecutionTimeInMillis?: number; +} + +export namespace QueryRuntimeStatisticsTimeline { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryRuntimeStatisticsTimeline): any => ({ + ...obj, + }); +} + export interface GetTableMetadataInput { /** *

          The name of the data catalog that contains the database and table metadata to @@ -3198,3 +3295,147 @@ export namespace UpdateWorkGroupOutput { ...obj, }); } + +/** + *

          Stage plan information such as name, identifier, sub plans, and remote sources.

          + */ +export interface QueryStagePlanNode { + /** + *

          Name of the query stage plan that describes the operation this stage is performing as part of + * query execution.

          + */ + Name?: string; + + /** + *

          Information about the operation this query stage plan node is performing.

          + */ + Identifier?: string; + + /** + *

          Stage plan information such as name, identifier, sub plans, and remote sources of child plan nodes/

          + */ + Children?: QueryStagePlanNode[]; + + /** + *

          Source plan node IDs.

          + */ + RemoteSources?: string[]; +} + +export namespace QueryStagePlanNode { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryStagePlanNode): any => ({ + ...obj, + }); +} + +/** + *

          Stage statistics such as input and output rows and bytes, execution time and stage state. This + * information also includes substages and the query stage plan.

          + */ +export interface QueryStage { + /** + *

          The identifier for a stage.

          + */ + StageId?: number; + + /** + *

          State of the stage after query execution.

          + */ + State?: string; + + /** + *

          The number of bytes output from the stage after execution.

          + */ + OutputBytes?: number; + + /** + *

          The number of rows output from the stage after execution.

          + */ + OutputRows?: number; + + /** + *

          The number of bytes input into the stage for execution.

          + */ + InputBytes?: number; + + /** + *

          The number of rows input into the stage for execution.

          + */ + InputRows?: number; + + /** + *

          Time taken to execute this stage.

          + */ + ExecutionTime?: number; + + /** + *

          Stage plan information such as name, identifier, sub plans, and source stages.

          + */ + QueryStagePlan?: QueryStagePlanNode; + + /** + *

          List of sub query stages that form this stage execution plan.

          + */ + SubStages?: QueryStage[]; +} + +export namespace QueryStage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryStage): any => ({ + ...obj, + }); +} + +/** + *

          The query execution timeline, statistics on input and output rows and bytes, and the different + * query stages that form the query execution plan.

          + */ +export interface QueryRuntimeStatistics { + /** + *

          Timeline statistics such as query queue time, planning time, execution time, service processing + * time, and total execution time.

          + */ + Timeline?: QueryRuntimeStatisticsTimeline; + + /** + *

          Statistics such as input rows and bytes read by the query, rows and bytes output by the query, + * and the number of rows written by the query.

          + */ + Rows?: QueryRuntimeStatisticsRows; + + /** + *

          Stage statistics such as input and output rows and bytes, execution time, and stage state. This + * information also includes substages and the query stage plan.

          + */ + OutputStage?: QueryStage; +} + +export namespace QueryRuntimeStatistics { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryRuntimeStatistics): any => ({ + ...obj, + }); +} + +export interface GetQueryRuntimeStatisticsOutput { + /** + *

          Runtime statistics about the query execution.

          + */ + QueryRuntimeStatistics?: QueryRuntimeStatistics; +} + +export namespace GetQueryRuntimeStatisticsOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryRuntimeStatisticsOutput): any => ({ + ...obj, + }); +} diff --git a/clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts b/clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts new file mode 100644 index 0000000000000..52f6913a5ba9b --- /dev/null +++ b/clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { Athena } from "../Athena"; +import { AthenaClient } from "../AthenaClient"; +import { + ListEngineVersionsCommand, + ListEngineVersionsCommandInput, + ListEngineVersionsCommandOutput, +} from "../commands/ListEngineVersionsCommand"; +import { AthenaPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: AthenaClient, + input: ListEngineVersionsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListEngineVersionsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Athena, + input: ListEngineVersionsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listEngineVersions(input, ...args); +}; +export async function* paginateListEngineVersions( + config: AthenaPaginationConfiguration, + input: ListEngineVersionsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListEngineVersionsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof Athena) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof AthenaClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Athena | AthenaClient"); + } + yield page; + const prevToken = token; + token = page.NextToken; + hasNext = !!(token && (!config.stopOnSameToken || token !== prevToken)); + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-athena/src/pagination/index.ts b/clients/client-athena/src/pagination/index.ts index 1c3e2fd9b64e3..3ab8ca4d7f75d 100644 --- a/clients/client-athena/src/pagination/index.ts +++ b/clients/client-athena/src/pagination/index.ts @@ -3,6 +3,7 @@ export * from "./GetQueryResultsPaginator"; export * from "./Interfaces"; export * from "./ListDataCatalogsPaginator"; export * from "./ListDatabasesPaginator"; +export * from "./ListEngineVersionsPaginator"; export * from "./ListNamedQueriesPaginator"; export * from "./ListPreparedStatementsPaginator"; export * from "./ListQueryExecutionsPaginator"; diff --git a/clients/client-athena/src/protocols/Aws_json1_1.ts b/clients/client-athena/src/protocols/Aws_json1_1.ts index 3e337e71e993f..35fb43b856ba3 100644 --- a/clients/client-athena/src/protocols/Aws_json1_1.ts +++ b/clients/client-athena/src/protocols/Aws_json1_1.ts @@ -50,6 +50,10 @@ import { } from "../commands/GetPreparedStatementCommand"; import { GetQueryExecutionCommandInput, GetQueryExecutionCommandOutput } from "../commands/GetQueryExecutionCommand"; import { GetQueryResultsCommandInput, GetQueryResultsCommandOutput } from "../commands/GetQueryResultsCommand"; +import { + GetQueryRuntimeStatisticsCommandInput, + GetQueryRuntimeStatisticsCommandOutput, +} from "../commands/GetQueryRuntimeStatisticsCommand"; import { GetTableMetadataCommandInput, GetTableMetadataCommandOutput } from "../commands/GetTableMetadataCommand"; import { GetWorkGroupCommandInput, GetWorkGroupCommandOutput } from "../commands/GetWorkGroupCommand"; import { ListDatabasesCommandInput, ListDatabasesCommandOutput } from "../commands/ListDatabasesCommand"; @@ -130,6 +134,8 @@ import { GetQueryExecutionOutput, GetQueryResultsInput, GetQueryResultsOutput, + GetQueryRuntimeStatisticsInput, + GetQueryRuntimeStatisticsOutput, GetTableMetadataInput, GetTableMetadataOutput, GetWorkGroupInput, @@ -162,6 +168,11 @@ import { QueryExecutionContext, QueryExecutionStatistics, QueryExecutionStatus, + QueryRuntimeStatistics, + QueryRuntimeStatisticsRows, + QueryRuntimeStatisticsTimeline, + QueryStage, + QueryStagePlanNode, ResourceNotFoundException, ResultConfiguration, ResultConfigurationUpdates, @@ -417,6 +428,19 @@ export const serializeAws_json1_1GetQueryResultsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1GetQueryRuntimeStatisticsCommand = async ( + input: GetQueryRuntimeStatisticsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AmazonAthena.GetQueryRuntimeStatistics", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1GetQueryRuntimeStatisticsInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1GetTableMetadataCommand = async ( input: GetTableMetadataCommandInput, context: __SerdeContext @@ -1472,6 +1496,53 @@ const deserializeAws_json1_1GetQueryResultsCommandError = async ( } }; +export const deserializeAws_json1_1GetQueryRuntimeStatisticsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetQueryRuntimeStatisticsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetQueryRuntimeStatisticsOutput(data, context); + const response: GetQueryRuntimeStatisticsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetQueryRuntimeStatisticsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __BaseException; + const errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.athena#InternalServerException": + throw await deserializeAws_json1_1InternalServerExceptionResponse(parsedOutput, context); + case "InvalidRequestException": + case "com.amazonaws.athena#InvalidRequestException": + throw await deserializeAws_json1_1InvalidRequestExceptionResponse(parsedOutput, context); + default: + const parsedBody = parsedOutput.body; + const $metadata = deserializeMetadata(output); + const statusCode = $metadata.httpStatusCode ? $metadata.httpStatusCode + "" : undefined; + response = new __BaseException({ + name: parsedBody.code || parsedBody.Code || errorCode || statusCode || "UnknowError", + $fault: "client", + $metadata, + }); + throw __decorateServiceException(response, parsedBody); + } +}; + export const deserializeAws_json1_1GetTableMetadataCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2632,6 +2703,15 @@ const serializeAws_json1_1GetQueryResultsInput = (input: GetQueryResultsInput, c }; }; +const serializeAws_json1_1GetQueryRuntimeStatisticsInput = ( + input: GetQueryRuntimeStatisticsInput, + context: __SerdeContext +): any => { + return { + ...(input.QueryExecutionId != null && { QueryExecutionId: input.QueryExecutionId }), + }; +}; + const serializeAws_json1_1GetTableMetadataInput = (input: GetTableMetadataInput, context: __SerdeContext): any => { return { ...(input.CatalogName != null && { CatalogName: input.CatalogName }), @@ -3280,6 +3360,18 @@ const deserializeAws_json1_1GetQueryResultsOutput = (output: any, context: __Ser } as any; }; +const deserializeAws_json1_1GetQueryRuntimeStatisticsOutput = ( + output: any, + context: __SerdeContext +): GetQueryRuntimeStatisticsOutput => { + return { + QueryRuntimeStatistics: + output.QueryRuntimeStatistics != null + ? deserializeAws_json1_1QueryRuntimeStatistics(output.QueryRuntimeStatistics, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1GetTableMetadataOutput = (output: any, context: __SerdeContext): GetTableMetadataOutput => { return { TableMetadata: @@ -3608,6 +3700,93 @@ const deserializeAws_json1_1QueryExecutionStatus = (output: any, context: __Serd } as any; }; +const deserializeAws_json1_1QueryRuntimeStatistics = (output: any, context: __SerdeContext): QueryRuntimeStatistics => { + return { + OutputStage: output.OutputStage != null ? deserializeAws_json1_1QueryStage(output.OutputStage, context) : undefined, + Rows: output.Rows != null ? deserializeAws_json1_1QueryRuntimeStatisticsRows(output.Rows, context) : undefined, + Timeline: + output.Timeline != null + ? deserializeAws_json1_1QueryRuntimeStatisticsTimeline(output.Timeline, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1QueryRuntimeStatisticsRows = ( + output: any, + context: __SerdeContext +): QueryRuntimeStatisticsRows => { + return { + InputBytes: __expectLong(output.InputBytes), + InputRows: __expectLong(output.InputRows), + OutputBytes: __expectLong(output.OutputBytes), + OutputRows: __expectLong(output.OutputRows), + } as any; +}; + +const deserializeAws_json1_1QueryRuntimeStatisticsTimeline = ( + output: any, + context: __SerdeContext +): QueryRuntimeStatisticsTimeline => { + return { + EngineExecutionTimeInMillis: __expectLong(output.EngineExecutionTimeInMillis), + QueryPlanningTimeInMillis: __expectLong(output.QueryPlanningTimeInMillis), + QueryQueueTimeInMillis: __expectLong(output.QueryQueueTimeInMillis), + ServiceProcessingTimeInMillis: __expectLong(output.ServiceProcessingTimeInMillis), + TotalExecutionTimeInMillis: __expectLong(output.TotalExecutionTimeInMillis), + } as any; +}; + +const deserializeAws_json1_1QueryStage = (output: any, context: __SerdeContext): QueryStage => { + return { + ExecutionTime: __expectLong(output.ExecutionTime), + InputBytes: __expectLong(output.InputBytes), + InputRows: __expectLong(output.InputRows), + OutputBytes: __expectLong(output.OutputBytes), + OutputRows: __expectLong(output.OutputRows), + QueryStagePlan: + output.QueryStagePlan != null + ? deserializeAws_json1_1QueryStagePlanNode(output.QueryStagePlan, context) + : undefined, + StageId: __expectLong(output.StageId), + State: __expectString(output.State), + SubStages: output.SubStages != null ? deserializeAws_json1_1QueryStages(output.SubStages, context) : undefined, + } as any; +}; + +const deserializeAws_json1_1QueryStagePlanNode = (output: any, context: __SerdeContext): QueryStagePlanNode => { + return { + Children: output.Children != null ? deserializeAws_json1_1QueryStagePlanNodes(output.Children, context) : undefined, + Identifier: __expectString(output.Identifier), + Name: __expectString(output.Name), + RemoteSources: + output.RemoteSources != null ? deserializeAws_json1_1StringList(output.RemoteSources, context) : undefined, + } as any; +}; + +const deserializeAws_json1_1QueryStagePlanNodes = (output: any, context: __SerdeContext): QueryStagePlanNode[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1QueryStagePlanNode(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_1QueryStages = (output: any, context: __SerdeContext): QueryStage[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1QueryStage(entry, context); + }); + return retVal; +}; + const deserializeAws_json1_1ResourceNotFoundException = ( output: any, context: __SerdeContext @@ -3684,6 +3863,18 @@ const deserializeAws_json1_1StopQueryExecutionOutput = ( return {} as any; }; +const deserializeAws_json1_1StringList = (output: any, context: __SerdeContext): string[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); + return retVal; +}; + const deserializeAws_json1_1TableMetadata = (output: any, context: __SerdeContext): TableMetadata => { return { Columns: output.Columns != null ? deserializeAws_json1_1ColumnList(output.Columns, context) : undefined, diff --git a/codegen/sdk-codegen/aws-models/athena.json b/codegen/sdk-codegen/aws-models/athena.json index cd6952f0c53dc..815897e19f806 100644 --- a/codegen/sdk-codegen/aws-models/athena.json +++ b/codegen/sdk-codegen/aws-models/athena.json @@ -114,6 +114,9 @@ { "target": "com.amazonaws.athena#GetQueryResults" }, + { + "target": "com.amazonaws.athena#GetQueryRuntimeStatistics" + }, { "target": "com.amazonaws.athena#GetTableMetadata" }, @@ -1549,6 +1552,49 @@ } } }, + "com.amazonaws.athena#GetQueryRuntimeStatistics": { + "type": "operation", + "input": { + "target": "com.amazonaws.athena#GetQueryRuntimeStatisticsInput" + }, + "output": { + "target": "com.amazonaws.athena#GetQueryRuntimeStatisticsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.athena#InternalServerException" + }, + { + "target": "com.amazonaws.athena#InvalidRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

          Returns query execution runtime statistics related to a single execution of a query if you\n have access to the workgroup in which the query ran. The query execution runtime statistics\n is returned only when QueryExecutionStatus$State is in a SUCCEEDED\n or FAILED state.

          " + } + }, + "com.amazonaws.athena#GetQueryRuntimeStatisticsInput": { + "type": "structure", + "members": { + "QueryExecutionId": { + "target": "com.amazonaws.athena#QueryExecutionId", + "traits": { + "smithy.api#documentation": "

          The unique ID of the query execution.

          ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.athena#GetQueryRuntimeStatisticsOutput": { + "type": "structure", + "members": { + "QueryRuntimeStatistics": { + "target": "com.amazonaws.athena#QueryRuntimeStatistics", + "traits": { + "smithy.api#documentation": "

          Runtime statistics about the query execution.

          " + } + } + } + }, "com.amazonaws.athena#GetTableMetadata": { "type": "operation", "input": { @@ -1848,7 +1894,12 @@ } ], "traits": { - "smithy.api#documentation": "

          Returns a list of engine versions that are available to choose from, including the\n Auto option.

          " + "smithy.api#documentation": "

          Returns a list of engine versions that are available to choose from, including the\n Auto option.

          ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } } }, "com.amazonaws.athena#ListEngineVersionsInput": { @@ -2834,6 +2885,202 @@ "smithy.api#documentation": "

          The completion date, current state, submission time, and state change reason (if\n applicable) for the query execution.

          " } }, + "com.amazonaws.athena#QueryRuntimeStatistics": { + "type": "structure", + "members": { + "Timeline": { + "target": "com.amazonaws.athena#QueryRuntimeStatisticsTimeline" + }, + "Rows": { + "target": "com.amazonaws.athena#QueryRuntimeStatisticsRows" + }, + "OutputStage": { + "target": "com.amazonaws.athena#QueryStage", + "traits": { + "smithy.api#documentation": "

          Stage statistics such as input and output rows and bytes, execution time, and stage state. This \n information also includes substages and the query stage plan.

          " + } + } + }, + "traits": { + "smithy.api#documentation": "

          The query execution timeline, statistics on input and output rows and bytes, and the different\n query stages that form the query execution plan.

          " + } + }, + "com.amazonaws.athena#QueryRuntimeStatisticsRows": { + "type": "structure", + "members": { + "InputRows": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of rows read to execute the query.

          " + } + }, + "InputBytes": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of bytes read to execute the query.

          " + } + }, + "OutputBytes": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of bytes returned by the query.

          " + } + }, + "OutputRows": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of rows returned by the query.

          " + } + } + }, + "traits": { + "smithy.api#documentation": "

          Statistics such as input rows and bytes read by the query, rows and bytes output by the query,\n and the number of rows written by the query.

          " + } + }, + "com.amazonaws.athena#QueryRuntimeStatisticsTimeline": { + "type": "structure", + "members": { + "QueryQueueTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of milliseconds that the query was in your query queue waiting for\n resources. Note that if transient errors occur, Athena might automatically\n add the query back to the queue.

          " + } + }, + "QueryPlanningTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of milliseconds that Athena took to plan the query processing\n flow. This includes the time spent retrieving table partitions from the data source.\n Note that because the query engine performs the query planning, query planning time is a\n subset of engine processing time.

          " + } + }, + "EngineExecutionTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of milliseconds that the query took to execute.

          " + } + }, + "ServiceProcessingTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of milliseconds that Athena took to finalize and publish the\n query results after the query engine finished running the query.

          " + } + }, + "TotalExecutionTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of milliseconds that Athena took to run the query.

          " + } + } + }, + "traits": { + "smithy.api#documentation": "

          Timeline statistics such as query queue time, planning time, execution time, service processing\n time, and total execution time.

          " + } + }, + "com.amazonaws.athena#QueryStage": { + "type": "structure", + "members": { + "StageId": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The identifier for a stage.

          " + } + }, + "State": { + "target": "com.amazonaws.athena#String", + "traits": { + "smithy.api#documentation": "

          State of the stage after query execution.

          " + } + }, + "OutputBytes": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of bytes output from the stage after execution.

          " + } + }, + "OutputRows": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of rows output from the stage after execution.

          " + } + }, + "InputBytes": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of bytes input into the stage for execution.

          " + } + }, + "InputRows": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          The number of rows input into the stage for execution.

          " + } + }, + "ExecutionTime": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "

          Time taken to execute this stage.

          " + } + }, + "QueryStagePlan": { + "target": "com.amazonaws.athena#QueryStagePlanNode", + "traits": { + "smithy.api#documentation": "

          Stage plan information such as name, identifier, sub plans, and source stages.

          " + } + }, + "SubStages": { + "target": "com.amazonaws.athena#QueryStages", + "traits": { + "smithy.api#documentation": "

          List of sub query stages that form this stage execution plan.

          " + } + } + }, + "traits": { + "smithy.api#documentation": "

          Stage statistics such as input and output rows and bytes, execution time and stage state. This \n information also includes substages and the query stage plan.

          " + } + }, + "com.amazonaws.athena#QueryStagePlanNode": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.athena#String", + "traits": { + "smithy.api#documentation": "

          Name of the query stage plan that describes the operation this stage is performing as part of\n query execution.

          " + } + }, + "Identifier": { + "target": "com.amazonaws.athena#String", + "traits": { + "smithy.api#documentation": "

          Information about the operation this query stage plan node is performing.

          " + } + }, + "Children": { + "target": "com.amazonaws.athena#QueryStagePlanNodes", + "traits": { + "smithy.api#documentation": "

          Stage plan information such as name, identifier, sub plans, and remote sources of child plan nodes/

          " + } + }, + "RemoteSources": { + "target": "com.amazonaws.athena#StringList", + "traits": { + "smithy.api#documentation": "

          Source plan node IDs.

          " + } + } + }, + "traits": { + "smithy.api#documentation": "

          Stage plan information such as name, identifier, sub plans, and remote sources.

          " + } + }, + "com.amazonaws.athena#QueryStagePlanNodes": { + "type": "list", + "member": { + "target": "com.amazonaws.athena#QueryStagePlanNode" + } + }, + "com.amazonaws.athena#QueryStages": { + "type": "list", + "member": { + "target": "com.amazonaws.athena#QueryStage" + } + }, "com.amazonaws.athena#QueryString": { "type": "string", "traits": { @@ -3165,6 +3412,12 @@ "com.amazonaws.athena#String": { "type": "string" }, + "com.amazonaws.athena#StringList": { + "type": "list", + "member": { + "target": "com.amazonaws.athena#String" + } + }, "com.amazonaws.athena#TableMetadata": { "type": "structure", "members": { From 3c22d7a670a19b887b04249ac5b47633efe83046 Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:03 +0000 Subject: [PATCH 25/31] docs(client-database-migration-service): Documentation updates for Database Migration Service (DMS). --- .../src/models/models_0.ts | 6 +++--- .../sdk-codegen/aws-models/database-migration-service.json | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clients/client-database-migration-service/src/models/models_0.ts b/clients/client-database-migration-service/src/models/models_0.ts index b1d1e22c0f186..6df6546bce3eb 100644 --- a/clients/client-database-migration-service/src/models/models_0.ts +++ b/clients/client-database-migration-service/src/models/models_0.ts @@ -3288,9 +3288,9 @@ export interface CreateEndpointMessage { * EndpointType value, include "mysql", "oracle", * "postgres", "mariadb", "aurora", * "aurora-postgresql", "opensearch", "redshift", "s3", - * "db2", "azuredb", "sybase", "dynamodb", "mongodb", + * "db2", db2-zos, "azuredb", "sybase", "dynamodb", "mongodb", * "kinesis", "kafka", "elasticsearch", "docdb", - * "sqlserver", and "neptune".

          + * "sqlserver", "neptune", and babelfish.

          */ EngineName: string | undefined; @@ -8885,7 +8885,7 @@ export interface TableStatistics { /** *

          The state of the tables described.

          *

          Valid states: Table does not exist | Before load | Full load | Table completed | Table - * cancelled | Table error | Table all | Table updates | Table is being reloaded

          + * cancelled | Table error | Table is being reloaded

          */ TableState?: string; diff --git a/codegen/sdk-codegen/aws-models/database-migration-service.json b/codegen/sdk-codegen/aws-models/database-migration-service.json index c3fc817717640..0db86e9d0b13b 100644 --- a/codegen/sdk-codegen/aws-models/database-migration-service.json +++ b/codegen/sdk-codegen/aws-models/database-migration-service.json @@ -958,7 +958,7 @@ "EngineName": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

          The type of engine for the endpoint. Valid values, depending on the\n EndpointType value, include \"mysql\", \"oracle\",\n \"postgres\", \"mariadb\", \"aurora\", \n \"aurora-postgresql\", \"opensearch\", \"redshift\", \"s3\",\n \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\",\n \"kinesis\", \"kafka\", \"elasticsearch\", \"docdb\",\n \"sqlserver\", and \"neptune\".

          ", + "smithy.api#documentation": "

          The type of engine for the endpoint. Valid values, depending on the\n EndpointType value, include \"mysql\", \"oracle\",\n \"postgres\", \"mariadb\", \"aurora\", \n \"aurora-postgresql\", \"opensearch\", \"redshift\", \"s3\",\n \"db2\", db2-zos, \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\",\n \"kinesis\", \"kafka\", \"elasticsearch\", \"docdb\",\n \"sqlserver\", \"neptune\", and babelfish.

          ", "smithy.api#required": {} } }, @@ -10333,7 +10333,7 @@ "TableState": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

          The state of the tables described.

          \n

          Valid states: Table does not exist | Before load | Full load | Table completed | Table\n cancelled | Table error | Table all | Table updates | Table is being reloaded

          " + "smithy.api#documentation": "

          The state of the tables described.

          \n

          Valid states: Table does not exist | Before load | Full load | Table completed | Table\n cancelled | Table error | Table is being reloaded

          " } }, "ValidationPendingRecords": { From de0697ad65ae98358ea534d9c2c90eadf4111bce Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:03 +0000 Subject: [PATCH 26/31] feat(client-kendra): Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html --- clients/client-kendra/src/Kendra.ts | 30 +++---- ...CreateAccessControlConfigurationCommand.ts | 22 +++-- .../src/commands/QueryCommand.ts | 3 +- ...UpdateAccessControlConfigurationCommand.ts | 8 +- clients/client-kendra/src/models/models_0.ts | 88 ++++++------------- clients/client-kendra/src/models/models_1.ts | 54 ++++++++++++ .../src/protocols/Aws_json1_1.ts | 5 +- codegen/sdk-codegen/aws-models/kendra.json | 35 ++++++-- 8 files changed, 142 insertions(+), 103 deletions(-) diff --git a/clients/client-kendra/src/Kendra.ts b/clients/client-kendra/src/Kendra.ts index 5695ae3d91f6e..9de8aceb01256 100644 --- a/clients/client-kendra/src/Kendra.ts +++ b/clients/client-kendra/src/Kendra.ts @@ -508,18 +508,16 @@ export class Kendra extends KendraClient { * user and group access information for your documents. This is useful * for user context filtering, where search results are filtered based * on the user or their group access to documents.

          - *

          You can use this to re-configure your existing document level access - * control without indexing all of your documents again. For example, your - * index contains top-secret company documents that only certain employees - * or users should access. One of these users leaves the company or switches - * to a team that should be blocked from access to top-secret documents. - * Your documents in your index still give this user access to top-secret - * documents due to the user having access at the time your documents were - * indexed. You can create a specific access control configuration for this - * user with deny access. You can later update the access control - * configuration to allow access in the case the user returns to the company - * and re-joins the 'top-secret' team. You can re-configure access control - * for your documents circumstances change.

          + *

          You can use this to re-configure your existing document level access control without + * indexing all of your documents again. For example, your index contains top-secret + * company documents that only certain employees or users should access. One of these users + * leaves the company or switches to a team that should be blocked from accessing + * top-secret documents. The user still has access to top-secret documents because the user + * had access when your documents were previously indexed. You + * can create a specific access control configuration for the user with deny + * access. You can later update the access control configuration to allow access if the + * user returns to the company and re-joins the 'top-secret' team. You can re-configure + * access control for your documents as circumstances change.

          *

          To apply your access control configuration to certain documents, you call * the BatchPutDocument * API with the AccessControlConfigurationId included in the @@ -2176,12 +2174,12 @@ export class Kendra extends KendraClient { * suddenly returns to their previous team and should no longer have access to top secret * documents. You can update the access control configuration to re-configure access * control for your documents as circumstances change.

          - *

          You call the BatchPutDocument - * API to apply the updated access control configuration, with the - * AccessControlConfigurationId included in the + *

          You call the BatchPutDocument API to + * apply the updated access control configuration, with the + * AccessControlConfigurationId included in the * Document * object. If you use an S3 bucket as a data source, you synchronize your data source to - * apply the the AccessControlConfigurationId in the .metadata.json file. + * apply the AccessControlConfigurationId in the .metadata.json file. * Amazon Kendra currently only supports access control configuration for S3 data * sources and documents indexed using the BatchPutDocument API.

          */ diff --git a/clients/client-kendra/src/commands/CreateAccessControlConfigurationCommand.ts b/clients/client-kendra/src/commands/CreateAccessControlConfigurationCommand.ts index 9e434b25d6ddd..8be5940e293b5 100644 --- a/clients/client-kendra/src/commands/CreateAccessControlConfigurationCommand.ts +++ b/clients/client-kendra/src/commands/CreateAccessControlConfigurationCommand.ts @@ -29,18 +29,16 @@ export interface CreateAccessControlConfigurationCommandOutput * user and group access information for your documents. This is useful * for user context filtering, where search results are filtered based * on the user or their group access to documents.

          - *

          You can use this to re-configure your existing document level access - * control without indexing all of your documents again. For example, your - * index contains top-secret company documents that only certain employees - * or users should access. One of these users leaves the company or switches - * to a team that should be blocked from access to top-secret documents. - * Your documents in your index still give this user access to top-secret - * documents due to the user having access at the time your documents were - * indexed. You can create a specific access control configuration for this - * user with deny access. You can later update the access control - * configuration to allow access in the case the user returns to the company - * and re-joins the 'top-secret' team. You can re-configure access control - * for your documents circumstances change.

          + *

          You can use this to re-configure your existing document level access control without + * indexing all of your documents again. For example, your index contains top-secret + * company documents that only certain employees or users should access. One of these users + * leaves the company or switches to a team that should be blocked from accessing + * top-secret documents. The user still has access to top-secret documents because the user + * had access when your documents were previously indexed. You + * can create a specific access control configuration for the user with deny + * access. You can later update the access control configuration to allow access if the + * user returns to the company and re-joins the 'top-secret' team. You can re-configure + * access control for your documents as circumstances change.

          *

          To apply your access control configuration to certain documents, you call * the BatchPutDocument * API with the AccessControlConfigurationId included in the diff --git a/clients/client-kendra/src/commands/QueryCommand.ts b/clients/client-kendra/src/commands/QueryCommand.ts index a43409f1e7746..7211c37bc04b5 100644 --- a/clients/client-kendra/src/commands/QueryCommand.ts +++ b/clients/client-kendra/src/commands/QueryCommand.ts @@ -13,8 +13,7 @@ import { } from "@aws-sdk/types"; import { KendraClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KendraClient"; -import { QueryResult } from "../models/models_0"; -import { QueryRequest } from "../models/models_1"; +import { QueryRequest, QueryResult } from "../models/models_1"; import { deserializeAws_json1_1QueryCommand, serializeAws_json1_1QueryCommand } from "../protocols/Aws_json1_1"; export interface QueryCommandInput extends QueryRequest {} diff --git a/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts b/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts index 2248110a4e519..825a9425d5ed9 100644 --- a/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts +++ b/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts @@ -37,12 +37,12 @@ export interface UpdateAccessControlConfigurationCommandOutput * suddenly returns to their previous team and should no longer have access to top secret * documents. You can update the access control configuration to re-configure access * control for your documents as circumstances change.

          - *

          You call the BatchPutDocument - * API to apply the updated access control configuration, with the - * AccessControlConfigurationId included in the + *

          You call the BatchPutDocument API to + * apply the updated access control configuration, with the + * AccessControlConfigurationId included in the * Document * object. If you use an S3 bucket as a data source, you synchronize your data source to - * apply the the AccessControlConfigurationId in the .metadata.json file. + * apply the AccessControlConfigurationId in the .metadata.json file. * Amazon Kendra currently only supports access control configuration for S3 data * sources and documents indexed using the BatchPutDocument API.

          * @example diff --git a/clients/client-kendra/src/models/models_0.ts b/clients/client-kendra/src/models/models_0.ts index b0e68b71fea60..3f4466deee090 100644 --- a/clients/client-kendra/src/models/models_0.ts +++ b/clients/client-kendra/src/models/models_0.ts @@ -4104,6 +4104,11 @@ export namespace ServiceNowConfiguration { }); } +export enum SharePointOnlineAuthenticationType { + HTTP_BASIC = "HTTP_BASIC", + OAUTH2 = "OAUTH2", +} + export enum SharePointVersion { SHAREPOINT_2013 = "SHAREPOINT_2013", SHAREPOINT_2016 = "SHAREPOINT_2016", @@ -4133,6 +4138,10 @@ export interface SharePointConfiguration { * domain name as part of the credentials. For * more information, see Using a * Microsoft SharePoint Data Source.

          + *

          You can also provide OAuth authentication credentials of user name, + * password, client ID, and client secret. For more information, see + * Authentication + * for a SharePoint data source.

          */ SecretArn: string | undefined; @@ -4207,6 +4216,14 @@ export interface SharePointConfiguration { * this to connect to SharePoint.

          */ SslCertificateS3Path?: S3Path; + + /** + *

          Whether you want to connect to SharePoint using basic authentication of + * user name and password, or OAuth authentication of user name, password, + * client ID, and client secret. You can use OAuth authentication for + * SharePoint Online.

          + */ + AuthenticationType?: SharePointOnlineAuthenticationType | string; } export namespace SharePointConfiguration { @@ -5906,7 +5923,8 @@ export interface DescribeAccessControlConfigurationRequest { IndexId: string | undefined; /** - *

          The identifier of the access control configuration you want to get information on.

          + *

          The identifier of the access control configuration + * you want to get information on.

          */ Id: string | undefined; } @@ -5945,8 +5963,8 @@ export interface DescribeAccessControlConfigurationResponse { AccessControlList?: Principal[]; /** - *

          The list of principal lists that - * define the hierarchy for which documents users should have access to.

          + *

          The list of principal + * lists that define the hierarchy for which documents users should have access to.

          */ HierarchicalAccessControlList?: HierarchicalPrincipal[]; } @@ -7654,10 +7672,10 @@ export interface ListAccessControlConfigurationsRequest { IndexId: string | undefined; /** - *

          If the previous response was incomplete (because there is more data - * to retrieve), Amazon Kendra returns a pagination token in the response. - * You can use this pagination token to retrieve the next set of access - * control configurations.

          + *

          If the previous response was incomplete (because + * there's more data to retrieve), Amazon Kendra returns a pagination token + * in the response. You can use this pagination token to retrieve the next set + * of access control configurations.

          */ NextToken?: string; @@ -7678,9 +7696,9 @@ export namespace ListAccessControlConfigurationsRequest { export interface ListAccessControlConfigurationsResponse { /** - *

          If the response is truncated, Amazon Kendra returns this token - * that you can use in the subsequent request to retrieve the next set of - * access control configurations.

          + *

          If the response is truncated, Amazon Kendra returns this + * token, which you can use in the subsequent request to retrieve the + * next set of access control configurations.

          */ NextToken?: string; @@ -10328,53 +10346,3 @@ export namespace AttributeFilter { ...obj, }); } - -export interface QueryResult { - /** - *

          The unique identifier for the search. You use QueryId - * to identify the search when using the feedback API.

          - */ - QueryId?: string; - - /** - *

          The results of the search.

          - */ - ResultItems?: QueryResultItem[]; - - /** - *

          Contains the facet results. A FacetResult contains the - * counts for each attribute key that was specified in the - * Facets input parameter.

          - */ - FacetResults?: FacetResult[]; - - /** - *

          The total number of items found by the search; however, you can only - * retrieve up to 100 items. For example, if the search found 192 items, - * you can only retrieve the first 100 of the items.

          - */ - TotalNumberOfResults?: number; - - /** - *

          A list of warning codes and their messages on problems with your query.

          - *

          Amazon Kendra currently only supports one type of warning, which is a warning - * on invalid syntax used in the query. For examples of invalid query syntax, - * see Searching - * with advanced query syntax.

          - */ - Warnings?: Warning[]; - - /** - *

          A list of information related to suggested spell corrections for a query.

          - */ - SpellCorrectedQueries?: SpellCorrectedQuery[]; -} - -export namespace QueryResult { - /** - * @internal - */ - export const filterSensitiveLog = (obj: QueryResult): any => ({ - ...obj, - }); -} diff --git a/clients/client-kendra/src/models/models_1.ts b/clients/client-kendra/src/models/models_1.ts index e6379c5c9fc40..9dfdd7ad900b7 100644 --- a/clients/client-kendra/src/models/models_1.ts +++ b/clients/client-kendra/src/models/models_1.ts @@ -3,12 +3,66 @@ import { AttributeFilter, DocumentRelevanceConfiguration, Facet, + FacetResult, + QueryResultItem, QueryResultType, SortingConfiguration, + SpellCorrectedQuery, SpellCorrectionConfiguration, UserContext, + Warning, } from "./models_0"; +export interface QueryResult { + /** + *

          The unique identifier for the search. You use QueryId + * to identify the search when using the feedback API.

          + */ + QueryId?: string; + + /** + *

          The results of the search.

          + */ + ResultItems?: QueryResultItem[]; + + /** + *

          Contains the facet results. A FacetResult contains the + * counts for each attribute key that was specified in the + * Facets input parameter.

          + */ + FacetResults?: FacetResult[]; + + /** + *

          The total number of items found by the search; however, you can only + * retrieve up to 100 items. For example, if the search found 192 items, + * you can only retrieve the first 100 of the items.

          + */ + TotalNumberOfResults?: number; + + /** + *

          A list of warning codes and their messages on problems with your query.

          + *

          Amazon Kendra currently only supports one type of warning, which is a warning + * on invalid syntax used in the query. For examples of invalid query syntax, + * see Searching + * with advanced query syntax.

          + */ + Warnings?: Warning[]; + + /** + *

          A list of information related to suggested spell corrections for a query.

          + */ + SpellCorrectedQueries?: SpellCorrectedQuery[]; +} + +export namespace QueryResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryResult): any => ({ + ...obj, + }); +} + export interface QueryRequest { /** *

          The unique identifier of the index to search. The identifier is diff --git a/clients/client-kendra/src/protocols/Aws_json1_1.ts b/clients/client-kendra/src/protocols/Aws_json1_1.ts index 2e07cef676f0b..53736bc9acc89 100644 --- a/clients/client-kendra/src/protocols/Aws_json1_1.ts +++ b/clients/client-kendra/src/protocols/Aws_json1_1.ts @@ -345,7 +345,6 @@ import { Principal, ProxyConfiguration, PutPrincipalMappingRequest, - QueryResult, QueryResultItem, QuerySuggestionsBlockListSummary, QuipConfiguration, @@ -420,7 +419,7 @@ import { WebCrawlerConfiguration, WorkDocsConfiguration, } from "../models/models_0"; -import { QueryRequest } from "../models/models_1"; +import { QueryRequest, QueryResult } from "../models/models_1"; export const serializeAws_json1_1AssociateEntitiesToExperienceCommand = async ( input: AssociateEntitiesToExperienceCommandInput, @@ -7290,6 +7289,7 @@ const serializeAws_json1_1ServiceNowServiceCatalogConfiguration = ( const serializeAws_json1_1SharePointConfiguration = (input: SharePointConfiguration, context: __SerdeContext): any => { return { + ...(input.AuthenticationType != null && { AuthenticationType: input.AuthenticationType }), ...(input.CrawlAttachments != null && { CrawlAttachments: input.CrawlAttachments }), ...(input.DisableLocalGroups != null && { DisableLocalGroups: input.DisableLocalGroups }), ...(input.DocumentTitleFieldName != null && { DocumentTitleFieldName: input.DocumentTitleFieldName }), @@ -10761,6 +10761,7 @@ const deserializeAws_json1_1SharePointConfiguration = ( context: __SerdeContext ): SharePointConfiguration => { return { + AuthenticationType: __expectString(output.AuthenticationType), CrawlAttachments: __expectBoolean(output.CrawlAttachments), DisableLocalGroups: __expectBoolean(output.DisableLocalGroups), DocumentTitleFieldName: __expectString(output.DocumentTitleFieldName), diff --git a/codegen/sdk-codegen/aws-models/kendra.json b/codegen/sdk-codegen/aws-models/kendra.json index 332f0c7b4b792..8ba98de8f3432 100644 --- a/codegen/sdk-codegen/aws-models/kendra.json +++ b/codegen/sdk-codegen/aws-models/kendra.json @@ -2115,7 +2115,7 @@ } ], "traits": { - "smithy.api#documentation": "

          Creates an access configuration for your documents. This includes \n user and group access information for your documents. This is useful \n for user context filtering, where search results are filtered based \n on the user or their group access to documents.

          \n

          You can use this to re-configure your existing document level access \n control without indexing all of your documents again. For example, your \n index contains top-secret company documents that only certain employees \n or users should access. One of these users leaves the company or switches \n to a team that should be blocked from access to top-secret documents. \n Your documents in your index still give this user access to top-secret \n documents due to the user having access at the time your documents were \n indexed. You can create a specific access control configuration for this \n user with deny access. You can later update the access control \n configuration to allow access in the case the user returns to the company \n and re-joins the 'top-secret' team. You can re-configure access control \n for your documents circumstances change.

          \n

          To apply your access control configuration to certain documents, you call \n the BatchPutDocument \n API with the AccessControlConfigurationId included in the \n Document \n object. If you use an S3 bucket as a data source, you update the \n .metadata.json with the AccessControlConfigurationId \n and synchronize your data source. Amazon Kendra currently only supports \n access control configuration for S3 data sources and documents indexed using the \n BatchPutDocument API.

          " + "smithy.api#documentation": "

          Creates an access configuration for your documents. This includes \n user and group access information for your documents. This is useful \n for user context filtering, where search results are filtered based \n on the user or their group access to documents.

          \n

          You can use this to re-configure your existing document level access control without\n indexing all of your documents again. For example, your index contains top-secret\n company documents that only certain employees or users should access. One of these users\n leaves the company or switches to a team that should be blocked from accessing \n top-secret documents. The user still has access to top-secret documents because the user \n had access when your documents were previously indexed. You \n can create a specific access control configuration for the user with deny \n access. You can later update the access control configuration to allow access if the \n user returns to the company and re-joins the 'top-secret' team. You can re-configure \n access control for your documents as circumstances change.

          \n

          To apply your access control configuration to certain documents, you call \n the BatchPutDocument \n API with the AccessControlConfigurationId included in the \n Document \n object. If you use an S3 bucket as a data source, you update the \n .metadata.json with the AccessControlConfigurationId \n and synchronize your data source. Amazon Kendra currently only supports \n access control configuration for S3 data sources and documents indexed using the \n BatchPutDocument API.

          " } }, "com.amazonaws.kendra#CreateAccessControlConfigurationRequest": { @@ -4005,7 +4005,7 @@ "Id": { "target": "com.amazonaws.kendra#AccessControlConfigurationId", "traits": { - "smithy.api#documentation": "

          The identifier of the access control configuration you want to get information on.

          ", + "smithy.api#documentation": "

          The identifier of the access control configuration\n you want to get information on.

          ", "smithy.api#required": {} } } @@ -4042,7 +4042,7 @@ "HierarchicalAccessControlList": { "target": "com.amazonaws.kendra#HierarchicalPrincipalList", "traits": { - "smithy.api#documentation": "

          The list of principal lists that \n define the hierarchy for which documents users should have access to.

          " + "smithy.api#documentation": "

          The list of principal \n lists that define the hierarchy for which documents users should have access to.

          " } } } @@ -7706,7 +7706,7 @@ "NextToken": { "target": "com.amazonaws.kendra#String", "traits": { - "smithy.api#documentation": "

          If the previous response was incomplete (because there is more data \n to retrieve), Amazon Kendra returns a pagination token in the response. \n You can use this pagination token to retrieve the next set of access \n control configurations.

          " + "smithy.api#documentation": "

          If the previous response was incomplete (because\n there's more data to retrieve), Amazon Kendra returns a pagination token \n in the response. You can use this pagination token to retrieve the next set \n of access control configurations.

          " } }, "MaxResults": { @@ -7723,7 +7723,7 @@ "NextToken": { "target": "com.amazonaws.kendra#String", "traits": { - "smithy.api#documentation": "

          If the response is truncated, Amazon Kendra returns this token \n that you can use in the subsequent request to retrieve the next set of \n access control configurations.

          " + "smithy.api#documentation": "

          If the response is truncated, Amazon Kendra returns this\n token, which you can use in the subsequent request to retrieve the \n next set of access control configurations.

          " } }, "AccessControlConfigurations": { @@ -10921,7 +10921,7 @@ "SecretArn": { "target": "com.amazonaws.kendra#SecretArn", "traits": { - "smithy.api#documentation": "

          The Amazon Resource Name (ARN) of an Secrets Manager \n secret that contains the user name and password required to \n connect to the SharePoint instance. \n If you use SharePoint Server, you also need to provide the sever \n domain name as part of the credentials. For\n more information, see Using a\n Microsoft SharePoint Data Source.

          ", + "smithy.api#documentation": "

          The Amazon Resource Name (ARN) of an Secrets Manager \n secret that contains the user name and password required to \n connect to the SharePoint instance. \n If you use SharePoint Server, you also need to provide the sever \n domain name as part of the credentials. For\n more information, see Using a\n Microsoft SharePoint Data Source.

          \n

          You can also provide OAuth authentication credentials of user name, \n password, client ID, and client secret. For more information, see \n Authentication \n for a SharePoint data source.

          ", "smithy.api#required": {} } }, @@ -10978,12 +10978,33 @@ "traits": { "smithy.api#documentation": "

          The path to the SSL certificate stored in an Amazon S3 bucket. You use \n this to connect to SharePoint.

          " } + }, + "AuthenticationType": { + "target": "com.amazonaws.kendra#SharePointOnlineAuthenticationType", + "traits": { + "smithy.api#documentation": "

          Whether you want to connect to SharePoint using basic authentication of \n user name and password, or OAuth authentication of user name, password, \n client ID, and client secret. You can use OAuth authentication for \n SharePoint Online.

          " + } } }, "traits": { "smithy.api#documentation": "

          Provides the configuration information to connect to Microsoft\n SharePoint as your data source.

          " } }, + "com.amazonaws.kendra#SharePointOnlineAuthenticationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "HTTP_BASIC", + "name": "HTTP_BASIC" + }, + { + "value": "OAUTH2", + "name": "OAUTH2" + } + ] + } + }, "com.amazonaws.kendra#SharePointUrlList": { "type": "list", "member": { @@ -12128,7 +12149,7 @@ } ], "traits": { - "smithy.api#documentation": "

          Updates an access control configuration for your documents in an index. This \n includes user and group access information for your documents. This is useful \n for user context filtering, where search results are filtered based on the user \n or their group access to documents.

          \n

          You can update an access control configuration you created without indexing all \n of your documents again. For example, your index contains top-secret company \n documents that only certain employees or users should access. You created an 'allow' \n access control configuration for one user who recently joined the 'top-secret' team, \n switching from a team with 'deny' access to top-secret documents. However, the user \n suddenly returns to their previous team and should no longer have access to top secret \n documents. You can update the access control configuration to re-configure access \n control for your documents as circumstances change.

          \n

          You call the BatchPutDocument \n API to apply the updated access control configuration, with the \n AccessControlConfigurationId included in the \n Document \n object. If you use an S3 bucket as a data source, you synchronize your data source to \n apply the the AccessControlConfigurationId in the .metadata.json file. \n Amazon Kendra currently only supports access control configuration for S3 data \n sources and documents indexed using the BatchPutDocument API.

          " + "smithy.api#documentation": "

          Updates an access control configuration for your documents in an index. This \n includes user and group access information for your documents. This is useful \n for user context filtering, where search results are filtered based on the user \n or their group access to documents.

          \n

          You can update an access control configuration you created without indexing all \n of your documents again. For example, your index contains top-secret company \n documents that only certain employees or users should access. You created an 'allow' \n access control configuration for one user who recently joined the 'top-secret' team, \n switching from a team with 'deny' access to top-secret documents. However, the user \n suddenly returns to their previous team and should no longer have access to top secret \n documents. You can update the access control configuration to re-configure access \n control for your documents as circumstances change.

          \n

          You call the BatchPutDocument API to\n apply the updated access control configuration, with the\n AccessControlConfigurationId included in the \n Document\n object. If you use an S3 bucket as a data source, you synchronize your data source to\n apply the AccessControlConfigurationId in the .metadata.json file. \n Amazon Kendra currently only supports access control configuration for S3 data\n sources and documents indexed using the BatchPutDocument API.

          " } }, "com.amazonaws.kendra#UpdateAccessControlConfigurationRequest": { From 64613bcf4ee61211072c89b351e932ac680caca7 Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:03 +0000 Subject: [PATCH 27/31] docs(client-rds): Adds support for creating an RDS Proxy for an RDS for MariaDB database. --- clients/client-rds/src/RDS.ts | 112 ++--- .../src/commands/BacktrackDBClusterCommand.ts | 2 +- .../commands/CopyDBClusterSnapshotCommand.ts | 51 +- .../src/commands/CopyDBSnapshotCommand.ts | 6 +- .../src/commands/CreateDBClusterCommand.ts | 4 +- .../CreateDBClusterEndpointCommand.ts | 2 +- .../CreateDBClusterParameterGroupCommand.ts | 18 +- .../src/commands/CreateDBInstanceCommand.ts | 8 + .../CreateDBInstanceReadReplicaCommand.ts | 4 +- .../CreateEventSubscriptionCommand.ts | 2 +- .../commands/CreateGlobalClusterCommand.ts | 2 +- .../src/commands/DescribeEventsCommand.ts | 2 + .../src/commands/FailoverDBClusterCommand.ts | 2 +- .../ModifyDBClusterParameterGroupCommand.ts | 2 +- ...ModifyDBClusterSnapshotAttributeCommand.ts | 2 +- .../ModifyDBSnapshotAttributeCommand.ts | 2 +- .../src/commands/RebootDBInstanceCommand.ts | 1 + ...tanceAutomatedBackupsReplicationCommand.ts | 2 +- clients/client-rds/src/models/models_0.ts | 435 +++++++++++++----- clients/client-rds/src/models/models_1.ts | 169 +++++-- codegen/sdk-codegen/aws-models/rds.json | 144 +++--- 21 files changed, 623 insertions(+), 349 deletions(-) diff --git a/clients/client-rds/src/RDS.ts b/clients/client-rds/src/RDS.ts index d1fc0bf56d1f5..859ccfad7f914 100644 --- a/clients/client-rds/src/RDS.ts +++ b/clients/client-rds/src/RDS.ts @@ -950,7 +950,7 @@ export class RDS extends RDSClient { * Backtracking an Aurora DB Cluster in the * Amazon Aurora User Guide.

          * - *

          This action only applies to Aurora MySQL DB clusters.

          + *

          This action applies only to Aurora MySQL DB clusters.

          *
          */ public backtrackDBCluster( @@ -1051,9 +1051,11 @@ export class RDS extends RDSClient { *

          Copies a snapshot of a DB cluster.

          *

          To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier * must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

          - *

          You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot action - * is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, - * you must provide the following values:

          + *

          You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, + * the Amazon Web Services Region where you call the CopyDBClusterSnapshot operation is the + * destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy + * an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the + * following values:

          *
            *
          • *

            @@ -1062,49 +1064,14 @@ export class RDS extends RDSClient { *

          • *
          • *

            - * PreSignedUrl - A URL that contains a Signature Version 4 signed request for the - * CopyDBClusterSnapshot action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from. - * The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the - * source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.

            - *

            The pre-signed URL request must contain the following parameter values:

            - *
              - *
            • - *

              - * KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB - * cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot - * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.

              - *
            • - *
            • - *

              - * DestinationRegion - The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.

              - *
            • - *
            • - *

              - * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster - * snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, - * if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier - * looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

              - *
            • - *
            - *

            To learn how to generate a Signature Version 4 signed request, see - * - * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and - * - * Signature Version 4 Signing Process.

            - * - *

            If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) - * instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid - * request for the operation that can be executed in the source Amazon Web Services Region.

            - *
            - *
          • - *
          • - *

            * TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.

            *
          • *
          • *

            - * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. - * This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

            + * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot + * identifier for the encrypted DB cluster snapshot to be copied. This identifier + * must be in the ARN format for the source Amazon Web Services Region and is the same value as + * the SourceDBClusterSnapshotIdentifier in the presigned URL.

            *
          • *
          *

          To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified @@ -1182,9 +1149,9 @@ export class RDS extends RDSClient { /** *

          Copies the specified DB snapshot. The source DB snapshot must be in the available state.

          - *

          You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region - * where you call the CopyDBSnapshot action is the destination Amazon Web Services Region for the - * DB snapshot copy.

          + *

          You can copy a snapshot from one Amazon Web Services Region to another. In that case, the + * Amazon Web Services Region where you call the CopyDBSnapshot operation is the destination + * Amazon Web Services Region for the DB snapshot copy.

          *

          This command doesn't apply to RDS Custom.

          *

          For more information about copying snapshots, see * Copying a DB Snapshot in the Amazon RDS User Guide.

          @@ -1316,9 +1283,7 @@ export class RDS extends RDSClient { *

          Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.

          *

          You can use the ReplicationSourceIdentifier parameter to create an Amazon * Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or - * PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by - * ReplicationSourceIdentifier is encrypted, also specify the - * PreSignedUrl parameter.

          + * PostgreSQL DB instance.

          *

          For more information on Amazon Aurora, see * * What is Amazon Aurora? in the Amazon Aurora User Guide.

          @@ -1358,7 +1323,7 @@ export class RDS extends RDSClient { /** *

          Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.

          * - *

          This action only applies to Aurora DB clusters.

          + *

          This action applies only to Aurora DB clusters.

          *
          */ public createDBClusterEndpoint( @@ -1405,15 +1370,15 @@ export class RDS extends RDSClient { * cluster without failover for the new DB cluster parameter group and associated settings to take effect.

          * *

          After you create a DB cluster parameter group, you should wait at least 5 minutes - * before creating your first DB cluster - * that uses that DB cluster parameter group as the default parameter - * group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter - * group is used as the default for a new DB cluster. This is especially important for parameters - * that are critical when creating the default database for a DB cluster, such as the character set - * for the default database defined by the character_set_database parameter. You can use the - * Parameter Groups option of the Amazon RDS console or the - * DescribeDBClusterParameters action to verify - * that your DB cluster parameter group has been created or modified.

          + * before creating your first DB cluster that uses that DB cluster parameter group as + * the default parameter group. This allows Amazon RDS to fully complete the create + * action before the DB cluster parameter group is used as the default for a new DB + * cluster. This is especially important for parameters that are critical when creating + * the default database for a DB cluster, such as the character set for the default + * database defined by the character_set_database parameter. You can use + * the Parameter Groups option of the Amazon RDS console or the + * DescribeDBClusterParameters operation to verify that your DB + * cluster parameter group has been created or modified.

          *
          *

          For more information on Amazon Aurora, see * @@ -1490,6 +1455,14 @@ export class RDS extends RDSClient { /** *

          Creates a new DB instance.

          + *

          The new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster. + * For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance + * to the cluster.

          + *

          For more information about creating an RDS DB instance, see + * Creating an Amazon RDS DB instance in the Amazon RDS User Guide.

          + *

          For more information about creating a DB instance in an Aurora DB cluster, see + * + * Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.

          */ public createDBInstance( args: CreateDBInstanceCommandInput, @@ -1525,8 +1498,8 @@ export class RDS extends RDSClient { * instance. You can create a read replica for a DB instance running MySQL, MariaDB, * Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read * Replicas in the Amazon RDS User Guide.

          - *

          Amazon Aurora doesn't support this action. Call the CreateDBInstance - * action to create a DB instance for an Aurora DB cluster.

          + *

          Amazon Aurora doesn't support this operation. Call the CreateDBInstance + * operation to create a DB instance for an Aurora DB cluster.

          *

          All read replica DB instances are created with backups disabled. All other DB * instance attributes (including DB security groups and DB parameter groups) are inherited * from the source DB instance, except as specified.

          @@ -1781,7 +1754,7 @@ export class RDS extends RDSClient { } /** - *

          Creates an RDS event notification subscription. This action requires a topic Amazon + *

          Creates an RDS event notification subscription. This operation requires a topic Amazon * Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. * To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the * topic. The ARN is displayed in the SNS console.

          @@ -1845,7 +1818,7 @@ export class RDS extends RDSClient { * Or you can specify an existing Aurora cluster during the create operation, * and this cluster becomes the primary cluster of the global database.

          * - *

          This action only applies to Aurora DB clusters.

          + *

          This action applies only to Aurora DB clusters.

          *
          */ public createGlobalCluster( @@ -3451,6 +3424,8 @@ export class RDS extends RDSClient { *

          Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. * Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be * obtained by providing the name as a parameter.

          + *

          For more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora + * events in the Amazon Aurora User Guide.

          * *

          By default, RDS returns events that were generated in the past hour.

          *
          @@ -3887,7 +3862,7 @@ export class RDS extends RDSClient { *

          For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) * in the DB cluster to be the primary DB instance (the cluster writer).

          *

          An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, - * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby + * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby * DB instance when the primary DB instance fails.

          *

          To simulate a failure of a primary instance for testing, you can force a failover. * Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing @@ -4242,7 +4217,7 @@ export class RDS extends RDSClient { * that are critical when creating the default database for a DB cluster, such as the character set * for the default database defined by the character_set_database parameter. You can use the * Parameter Groups option of the Amazon RDS console or the - * DescribeDBClusterParameters action to verify + * DescribeDBClusterParameters operation to verify * that your DB cluster parameter group has been created or modified.

          *

          If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora * applies the update immediately. The cluster restart might interrupt your workload. In that case, @@ -4304,7 +4279,7 @@ export class RDS extends RDSClient { * parameter. You can't use all as a value for that parameter in this * case.

          *

          To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster - * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are + * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are * returned as values for the restore attribute.

          */ public modifyDBClusterSnapshotAttribute( @@ -4566,7 +4541,7 @@ export class RDS extends RDSClient { * list of authorized Amazon Web Services account IDs for the ValuesToAdd parameter. You * can't use all as a value for that parameter in this case.

          *

          To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or - * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as + * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as * values for the restore attribute.

          */ public modifyDBSnapshotAttribute( @@ -4902,6 +4877,7 @@ export class RDS extends RDSClient { *

          For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide. *

          *

          This command doesn't apply to RDS Custom.

          + *

          If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster operation.

          */ public rebootDBInstance( args: RebootDBInstanceCommandInput, @@ -5845,7 +5821,7 @@ export class RDS extends RDSClient { /** *

          Stops automated backup replication for a DB instance.

          - *

          This command doesn't apply to RDS Custom.

          + *

          This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.

          *

          For more information, see * Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide. *

          diff --git a/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts b/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts index 77d8876d1516b..e370d060d1985 100644 --- a/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts +++ b/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts @@ -29,7 +29,7 @@ export interface BacktrackDBClusterCommandOutput extends DBClusterBacktrack, __M * Backtracking an Aurora DB Cluster in the * Amazon Aurora User Guide.

          * - *

          This action only applies to Aurora MySQL DB clusters.

          + *

          This action applies only to Aurora MySQL DB clusters.

          *
          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-rds/src/commands/CopyDBClusterSnapshotCommand.ts b/clients/client-rds/src/commands/CopyDBClusterSnapshotCommand.ts index e6560f28a8bff..9f07487b29c7b 100644 --- a/clients/client-rds/src/commands/CopyDBClusterSnapshotCommand.ts +++ b/clients/client-rds/src/commands/CopyDBClusterSnapshotCommand.ts @@ -27,9 +27,11 @@ export interface CopyDBClusterSnapshotCommandOutput extends CopyDBClusterSnapsho *

          Copies a snapshot of a DB cluster.

          *

          To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier * must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

          - *

          You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot action - * is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, - * you must provide the following values:

          + *

          You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, + * the Amazon Web Services Region where you call the CopyDBClusterSnapshot operation is the + * destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy + * an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the + * following values:

          *
            *
          • *

            @@ -38,49 +40,14 @@ export interface CopyDBClusterSnapshotCommandOutput extends CopyDBClusterSnapsho *

          • *
          • *

            - * PreSignedUrl - A URL that contains a Signature Version 4 signed request for the - * CopyDBClusterSnapshot action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from. - * The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the - * source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.

            - *

            The pre-signed URL request must contain the following parameter values:

            - *
              - *
            • - *

              - * KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB - * cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot - * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.

              - *
            • - *
            • - *

              - * DestinationRegion - The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.

              - *
            • - *
            • - *

              - * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster - * snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, - * if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier - * looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

              - *
            • - *
            - *

            To learn how to generate a Signature Version 4 signed request, see - * - * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and - * - * Signature Version 4 Signing Process.

            - * - *

            If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) - * instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid - * request for the operation that can be executed in the source Amazon Web Services Region.

            - *
            - *
          • - *
          • - *

            * TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.

            *
          • *
          • *

            - * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. - * This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

            + * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot + * identifier for the encrypted DB cluster snapshot to be copied. This identifier + * must be in the ARN format for the source Amazon Web Services Region and is the same value as + * the SourceDBClusterSnapshotIdentifier in the presigned URL.

            *
          • *
          *

          To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified diff --git a/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts b/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts index 5b0b9989906ab..354da5bb7264a 100644 --- a/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts +++ b/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts @@ -25,9 +25,9 @@ export interface CopyDBSnapshotCommandOutput extends CopyDBSnapshotResult, __Met /** *

          Copies the specified DB snapshot. The source DB snapshot must be in the available state.

          - *

          You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region - * where you call the CopyDBSnapshot action is the destination Amazon Web Services Region for the - * DB snapshot copy.

          + *

          You can copy a snapshot from one Amazon Web Services Region to another. In that case, the + * Amazon Web Services Region where you call the CopyDBSnapshot operation is the destination + * Amazon Web Services Region for the DB snapshot copy.

          *

          This command doesn't apply to RDS Custom.

          *

          For more information about copying snapshots, see * Copying a DB Snapshot in the Amazon RDS User Guide.

          diff --git a/clients/client-rds/src/commands/CreateDBClusterCommand.ts b/clients/client-rds/src/commands/CreateDBClusterCommand.ts index a21220879534a..1e3004de5e061 100644 --- a/clients/client-rds/src/commands/CreateDBClusterCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterCommand.ts @@ -27,9 +27,7 @@ export interface CreateDBClusterCommandOutput extends CreateDBClusterResult, __M *

          Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.

          *

          You can use the ReplicationSourceIdentifier parameter to create an Amazon * Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or - * PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by - * ReplicationSourceIdentifier is encrypted, also specify the - * PreSignedUrl parameter.

          + * PostgreSQL DB instance.

          *

          For more information on Amazon Aurora, see * * What is Amazon Aurora? in the Amazon Aurora User Guide.

          diff --git a/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts b/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts index 77e3008cb4de0..2404b4edf0f20 100644 --- a/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts @@ -25,7 +25,7 @@ export interface CreateDBClusterEndpointCommandOutput extends DBClusterEndpoint, /** *

          Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.

          * - *

          This action only applies to Aurora DB clusters.

          + *

          This action applies only to Aurora DB clusters.

          *
          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-rds/src/commands/CreateDBClusterParameterGroupCommand.ts b/clients/client-rds/src/commands/CreateDBClusterParameterGroupCommand.ts index 75183c21c654e..65aaf2b244360 100644 --- a/clients/client-rds/src/commands/CreateDBClusterParameterGroupCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterParameterGroupCommand.ts @@ -39,15 +39,15 @@ export interface CreateDBClusterParameterGroupCommandOutput * cluster without failover for the new DB cluster parameter group and associated settings to take effect.

          * *

          After you create a DB cluster parameter group, you should wait at least 5 minutes - * before creating your first DB cluster - * that uses that DB cluster parameter group as the default parameter - * group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter - * group is used as the default for a new DB cluster. This is especially important for parameters - * that are critical when creating the default database for a DB cluster, such as the character set - * for the default database defined by the character_set_database parameter. You can use the - * Parameter Groups option of the Amazon RDS console or the - * DescribeDBClusterParameters action to verify - * that your DB cluster parameter group has been created or modified.

          + * before creating your first DB cluster that uses that DB cluster parameter group as + * the default parameter group. This allows Amazon RDS to fully complete the create + * action before the DB cluster parameter group is used as the default for a new DB + * cluster. This is especially important for parameters that are critical when creating + * the default database for a DB cluster, such as the character set for the default + * database defined by the character_set_database parameter. You can use + * the Parameter Groups option of the Amazon RDS console or the + * DescribeDBClusterParameters operation to verify that your DB + * cluster parameter group has been created or modified.

          *
          *

          For more information on Amazon Aurora, see * diff --git a/clients/client-rds/src/commands/CreateDBInstanceCommand.ts b/clients/client-rds/src/commands/CreateDBInstanceCommand.ts index 447a1f1307277..2d232eec34b53 100644 --- a/clients/client-rds/src/commands/CreateDBInstanceCommand.ts +++ b/clients/client-rds/src/commands/CreateDBInstanceCommand.ts @@ -24,6 +24,14 @@ export interface CreateDBInstanceCommandOutput extends CreateDBInstanceResult, _ /** *

          Creates a new DB instance.

          + *

          The new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster. + * For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance + * to the cluster.

          + *

          For more information about creating an RDS DB instance, see + * Creating an Amazon RDS DB instance in the Amazon RDS User Guide.

          + *

          For more information about creating a DB instance in an Aurora DB cluster, see + * + * Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.

          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/CreateDBInstanceReadReplicaCommand.ts b/clients/client-rds/src/commands/CreateDBInstanceReadReplicaCommand.ts index 9868bddbb24c2..84bac761b2a57 100644 --- a/clients/client-rds/src/commands/CreateDBInstanceReadReplicaCommand.ts +++ b/clients/client-rds/src/commands/CreateDBInstanceReadReplicaCommand.ts @@ -28,8 +28,8 @@ export interface CreateDBInstanceReadReplicaCommandOutput extends CreateDBInstan * instance. You can create a read replica for a DB instance running MySQL, MariaDB, * Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read * Replicas in the Amazon RDS User Guide.

          - *

          Amazon Aurora doesn't support this action. Call the CreateDBInstance - * action to create a DB instance for an Aurora DB cluster.

          + *

          Amazon Aurora doesn't support this operation. Call the CreateDBInstance + * operation to create a DB instance for an Aurora DB cluster.

          *

          All read replica DB instances are created with backups disabled. All other DB * instance attributes (including DB security groups and DB parameter groups) are inherited * from the source DB instance, except as specified.

          diff --git a/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts b/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts index 65e45dc947df6..c12ea4af2652a 100644 --- a/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts +++ b/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts @@ -23,7 +23,7 @@ export interface CreateEventSubscriptionCommandInput extends CreateEventSubscrip export interface CreateEventSubscriptionCommandOutput extends CreateEventSubscriptionResult, __MetadataBearer {} /** - *

          Creates an RDS event notification subscription. This action requires a topic Amazon + *

          Creates an RDS event notification subscription. This operation requires a topic Amazon * Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. * To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the * topic. The ARN is displayed in the SNS console.

          diff --git a/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts b/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts index d025ff7872542..a74eca4ec68f2 100644 --- a/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts +++ b/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts @@ -34,7 +34,7 @@ export interface CreateGlobalClusterCommandOutput extends CreateGlobalClusterRes * Or you can specify an existing Aurora cluster during the create operation, * and this cluster becomes the primary cluster of the global database.

          * - *

          This action only applies to Aurora DB clusters.

          + *

          This action applies only to Aurora DB clusters.

          *
          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-rds/src/commands/DescribeEventsCommand.ts b/clients/client-rds/src/commands/DescribeEventsCommand.ts index 424071c4409a6..e6f95851e402a 100644 --- a/clients/client-rds/src/commands/DescribeEventsCommand.ts +++ b/clients/client-rds/src/commands/DescribeEventsCommand.ts @@ -26,6 +26,8 @@ export interface DescribeEventsCommandOutput extends EventsMessage, __MetadataBe *

          Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. * Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be * obtained by providing the name as a parameter.

          + *

          For more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora + * events in the Amazon Aurora User Guide.

          * *

          By default, RDS returns events that were generated in the past hour.

          *
          diff --git a/clients/client-rds/src/commands/FailoverDBClusterCommand.ts b/clients/client-rds/src/commands/FailoverDBClusterCommand.ts index 2e39dab37d3f2..a787587f2606b 100644 --- a/clients/client-rds/src/commands/FailoverDBClusterCommand.ts +++ b/clients/client-rds/src/commands/FailoverDBClusterCommand.ts @@ -29,7 +29,7 @@ export interface FailoverDBClusterCommandOutput extends FailoverDBClusterResult, *

          For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) * in the DB cluster to be the primary DB instance (the cluster writer).

          *

          An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, - * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby + * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby * DB instance when the primary DB instance fails.

          *

          To simulate a failure of a primary instance for testing, you can force a failover. * Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing diff --git a/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts b/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts index 39aa859d1b31e..fd31289a20316 100644 --- a/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts +++ b/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts @@ -37,7 +37,7 @@ export interface ModifyDBClusterParameterGroupCommandOutput * that are critical when creating the default database for a DB cluster, such as the character set * for the default database defined by the character_set_database parameter. You can use the * Parameter Groups option of the Amazon RDS console or the - * DescribeDBClusterParameters action to verify + * DescribeDBClusterParameters operation to verify * that your DB cluster parameter group has been created or modified.

          *

          If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora * applies the update immediately. The cluster restart might interrupt your workload. In that case, diff --git a/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts b/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts index 6ad6af00a36f2..07291f8cf7ce0 100644 --- a/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts +++ b/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts @@ -42,7 +42,7 @@ export interface ModifyDBClusterSnapshotAttributeCommandOutput * parameter. You can't use all as a value for that parameter in this * case.

          *

          To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster - * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are + * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are * returned as values for the restore attribute.

          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-rds/src/commands/ModifyDBSnapshotAttributeCommand.ts b/clients/client-rds/src/commands/ModifyDBSnapshotAttributeCommand.ts index 73cf8aaa5998d..27df601ab68f1 100644 --- a/clients/client-rds/src/commands/ModifyDBSnapshotAttributeCommand.ts +++ b/clients/client-rds/src/commands/ModifyDBSnapshotAttributeCommand.ts @@ -38,7 +38,7 @@ export interface ModifyDBSnapshotAttributeCommandOutput extends ModifyDBSnapshot * list of authorized Amazon Web Services account IDs for the ValuesToAdd parameter. You * can't use all as a value for that parameter in this case.

          *

          To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or - * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as + * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as * values for the restore attribute.

          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-rds/src/commands/RebootDBInstanceCommand.ts b/clients/client-rds/src/commands/RebootDBInstanceCommand.ts index 509a3afe99fc7..3b1f083e94d83 100644 --- a/clients/client-rds/src/commands/RebootDBInstanceCommand.ts +++ b/clients/client-rds/src/commands/RebootDBInstanceCommand.ts @@ -32,6 +32,7 @@ export interface RebootDBInstanceCommandOutput extends RebootDBInstanceResult, _ *

          For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide. *

          *

          This command doesn't apply to RDS Custom.

          + *

          If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster operation.

          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/commands/StopDBInstanceAutomatedBackupsReplicationCommand.ts b/clients/client-rds/src/commands/StopDBInstanceAutomatedBackupsReplicationCommand.ts index a39f9034e56d1..49071dae6d8d6 100644 --- a/clients/client-rds/src/commands/StopDBInstanceAutomatedBackupsReplicationCommand.ts +++ b/clients/client-rds/src/commands/StopDBInstanceAutomatedBackupsReplicationCommand.ts @@ -30,7 +30,7 @@ export interface StopDBInstanceAutomatedBackupsReplicationCommandOutput /** *

          Stops automated backup replication for a DB instance.

          - *

          This command doesn't apply to RDS Custom.

          + *

          This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.

          *

          For more information, see * Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide. *

          diff --git a/clients/client-rds/src/models/models_0.ts b/clients/client-rds/src/models/models_0.ts index d5d05c96e71dc..8d40e4ce37a49 100644 --- a/clients/client-rds/src/models/models_0.ts +++ b/clients/client-rds/src/models/models_0.ts @@ -1632,22 +1632,33 @@ export interface CopyDBClusterSnapshotMessage { KmsKeyId?: string; /** - *

          The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the Amazon Web Services Region that contains the - * source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another Amazon Web Services Region. - * Don't specify PreSignedUrl when you are copying an encrypted DB cluster snapshot in the same Amazon Web Services Region.

          - *

          The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be - * executed in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied. - * The pre-signed URL request must contain the following parameter values:

          + *

          When you are copying a DB cluster snapshot from one Amazon Web Services GovCloud (US) Region + * to another, the URL that contains a Signature Version 4 signed request for the + * CopyDBClusterSnapshot API operation in the Amazon Web Services Region that contains + * the source DB cluster snapshot to copy. Use the PreSignedUrl parameter when + * copying an encrypted DB cluster snapshot from another Amazon Web Services Region. Don't specify + * PreSignedUrl when copying an encrypted DB cluster snapshot in the same + * Amazon Web Services Region.

          + *

          This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other + * Amazon Web Services Regions.

          + *

          The presigned URL must be a valid request for the + * CopyDBClusterSnapshot API operation that can run in the source + * Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request + * must contain the following parameter values:

          *
            *
          • *

            - * KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB - * cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot - * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.

            + * KmsKeyId - The KMS key identifier for the KMS key + * to use to encrypt the copy of the DB cluster snapshot in the destination + * Amazon Web Services Region. This is the same identifier for both the + * CopyDBClusterSnapshot operation that is called in the + * destination Amazon Web Services Region, and the operation contained in the presigned + * URL.

            *
          • *
          • *

            - * DestinationRegion - The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.

            + * DestinationRegion - The name of the Amazon Web Services Region + * that the DB cluster snapshot is to be created in.

            *
          • *
          • *

            @@ -1658,15 +1669,16 @@ export interface CopyDBClusterSnapshotMessage { *

          • *
          *

          To learn how to generate a Signature Version 4 signed request, see - * * * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and * * Signature Version 4 Signing Process.

          * - *

          If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) - * instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid - * request for the operation that can be executed in the source Amazon Web Services Region.

          + *

          If you are using an Amazon Web Services SDK tool or the CLI, you can specify + * SourceRegion (or --source-region for the CLI) + * instead of specifying PreSignedUrl manually. Specifying + * SourceRegion autogenerates a presigned URL that is a valid request + * for the operation that can run in the source Amazon Web Services Region.

          *
          */ PreSignedUrl?: string; @@ -2085,9 +2097,7 @@ export interface CopyDBSnapshotMessage { * arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805.

          *

          If you are copying from a shared manual DB snapshot, * this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.

          - *

          If you are copying an encrypted snapshot - * this parameter must be in the ARN format for the source Amazon Web Services Region, - * and must match the SourceDBSnapshotIdentifier in the PreSignedUrl parameter.

          + *

          If you are copying an encrypted snapshot this parameter must be in the ARN format for the source Amazon Web Services Region.

          *

          Constraints:

          *
            *
          • @@ -2154,32 +2164,40 @@ export interface CopyDBSnapshotMessage { CopyTags?: boolean; /** - *

            The URL that contains a Signature Version 4 signed request for the - * CopyDBSnapshot API action in the source Amazon Web Services Region that contains the - * source DB snapshot to copy.

            + *

            When you are copying a snapshot from one Amazon Web Services GovCloud (US) Region to another, + * the URL that contains a Signature Version 4 signed request for the CopyDBSnapshot API + * operation in the source Amazon Web Services Region that contains the source DB snapshot to copy.

            + *

            This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other + * Amazon Web Services Regions.

            *

            You must specify this parameter when you copy an encrypted DB snapshot from another * Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are * copying an encrypted DB snapshot in the same Amazon Web Services Region.

            - *

            The presigned URL must be a valid request for the CopyDBSnapshot API action - * that can be executed in the source Amazon Web Services Region that contains the encrypted DB snapshot to be copied. - * The presigned URL request must contain the following parameter values:

            + *

            The presigned URL must be a valid request for the + * CopyDBClusterSnapshot API operation that can run in the source + * Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request + * must contain the following parameter values:

            *
              *
            • *

              - * DestinationRegion - The Amazon Web Services Region that the encrypted DB snapshot is copied to. - * This Amazon Web Services Region is the same one where the CopyDBSnapshot action is called that contains this presigned URL.

              - *

              For example, if you copy an encrypted DB snapshot from the us-west-2 Amazon Web Services Region - * to the us-east-1 Amazon Web Services Region, then you call the CopyDBSnapshot action in - * the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the - * CopyDBSnapshot action in the us-west-2 Amazon Web Services Region. For this - * example, the DestinationRegion in the presigned URL must be set to - * the us-east-1 Amazon Web Services Region.

              + * DestinationRegion - The Amazon Web Services Region that the encrypted DB + * snapshot is copied to. This Amazon Web Services Region is the same one where the + * CopyDBSnapshot operation is called that contains this presigned + * URL.

              + *

              For example, if you copy an encrypted DB snapshot from the us-west-2 + * Amazon Web Services Region to the us-east-1 Amazon Web Services Region, then you call the + * CopyDBSnapshot operation in the us-east-1 Amazon Web Services Region and + * provide a presigned URL that contains a call to the CopyDBSnapshot + * operation in the us-west-2 Amazon Web Services Region. For this example, the + * DestinationRegion in the presigned URL must be set to the + * us-east-1 Amazon Web Services Region.

              *
            • *
            • *

              - * KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. - * This is the same identifier for both the CopyDBSnapshot action that is called in the destination Amazon Web Services Region, - * and the action contained in the presigned URL.

              + * KmsKeyId - The KMS key identifier for the KMS key to use to + * encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. This is the + * same identifier for both the CopyDBSnapshot operation that is + * called in the destination Amazon Web Services Region, and the operation contained in the + * presigned URL.

              *
            • *
            • *

              @@ -2193,9 +2211,11 @@ export interface CopyDBSnapshotMessage { * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and * Signature Version 4 Signing Process.

              * - *

              If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) - * instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid - * request for the operation that can be executed in the source Amazon Web Services Region.

              + *

              If you are using an Amazon Web Services SDK tool or the CLI, you can specify + * SourceRegion (or --source-region for the CLI) + * instead of specifying PreSignedUrl manually. Specifying + * SourceRegion autogenerates a presigned URL that is a valid request + * for the operation that can run in the source Amazon Web Services Region.

              *
              */ PreSignedUrl?: string; @@ -2755,7 +2775,8 @@ export interface VpcSecurityGroupMembership { VpcSecurityGroupId?: string; /** - *

              The status of the VPC security group.

              + *

              The membership status of the VPC security group.

              + *

              Currently, the only valid status is active.

              */ Status?: string; } @@ -3773,18 +3794,24 @@ export interface CreateDBClusterMessage { KmsKeyId?: string; /** - *

              A URL that contains a Signature Version 4 signed request for - * the CreateDBCluster action to be called in the source Amazon Web Services Region where the DB cluster is replicated from. - * Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster.

              - *

              The pre-signed URL must be a valid request for the CreateDBCluster API action - * that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster to be copied.

              - *

              The pre-signed URL request must contain the following parameter values:

              + *

              When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another, + * an URL that contains a Signature Version 4 signed request for the + * CreateDBCluster operation to be called in the source Amazon Web Services Region where + * the DB cluster is replicated from. Specify PreSignedUrl only when you are + * performing cross-Region replication from an encrypted DB cluster.

              + * + *

              The presigned URL must be a valid request for the CreateDBCluster API + * operation that can run in the source Amazon Web Services Region that contains the encrypted DB + * cluster to copy.

              + *

              The presigned URL request must contain the following parameter values:

              *
                *
              • *

                - * KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of - * the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster - * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.

                + * KmsKeyId - The KMS key identifier for the KMS key to use to + * encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should + * refer to the same KMS key for both the CreateDBCluster operation + * that is called in the destination Amazon Web Services Region, and the operation contained in + * the presigned URL.

                *
              • *
              • *

                @@ -3805,9 +3832,11 @@ export interface CreateDBClusterMessage { * * Signature Version 4 Signing Process.

                * - *

                If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) - * instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid - * request for the operation that can be executed in the source Amazon Web Services Region.

                + *

                If you are using an Amazon Web Services SDK tool or the CLI, you can specify + * SourceRegion (or --source-region for the CLI) + * instead of specifying PreSignedUrl manually. Specifying + * SourceRegion autogenerates a presigned URL that is a valid request + * for the operation that can run in the source Amazon Web Services Region.

                *
                *

                Valid for: Aurora DB clusters only

                */ @@ -3819,7 +3848,7 @@ export interface CreateDBClusterMessage { * enabled.

                *

                For more information, see * - * IAM Database Authentication in the Amazon Aurora User Guide..

                + * IAM Database Authentication in the Amazon Aurora User Guide.

                *

                Valid for: Aurora DB clusters only

                */ EnableIAMDatabaseAuthentication?: boolean; @@ -3871,6 +3900,7 @@ export interface CreateDBClusterMessage { *

                The global engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, * and global engine mode isn't required for any 2.x versions.

                *

                The multimaster engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.

                + *

                The serverless engine mode only applies for Aurora Serverless v1 DB clusters.

                *

                For Aurora PostgreSQL, the global engine mode isn't required, and both the parallelquery * and the multimaster engine modes currently aren't supported.

                *

                Limitations and requirements apply to some DB engine modes. For more information, see the @@ -3878,26 +3908,31 @@ export interface CreateDBClusterMessage { *

                @@ -4089,7 +4124,35 @@ export interface CreateDBClusterMessage { PerformanceInsightsKMSKeyId?: string; /** - *

                The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                + *

                The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                + *
                  + *
                • + *

                  7

                  + *
                • + *
                • + *

                  + * month * 31, where month is a number of months from 1-23

                  + *
                • + *
                • + *

                  731

                  + *
                • + *
                + *

                For example, the following values are valid:

                + *
                  + *
                • + *

                  93 (3 months * 31)

                  + *
                • + *
                • + *

                  341 (11 months * 31)

                  + *
                • + *
                • + *

                  589 (19 months * 31)

                  + *
                • + *
                • + *

                  731

                  + *
                • + *
                + *

                If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                *

                Valid for: Multi-AZ DB clusters only

                */ PerformanceInsightsRetentionPeriod?: number; @@ -4332,8 +4395,7 @@ export namespace ClusterPendingModifiedValues { */ export interface ScalingConfigurationInfo { /** - *

                The maximum capacity for the Aurora DB cluster in serverless DB engine - * mode.

                + *

                The minimum capacity for an Aurora DB cluster in serverless DB engine mode.

                */ MinCapacity?: number; @@ -4828,7 +4890,34 @@ export interface DBCluster { PerformanceInsightsKMSKeyId?: string; /** - *

                The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                + *

                The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                + *
                  + *
                • + *

                  7

                  + *
                • + *
                • + *

                  + * month * 31, where month is a number of months from 1-23

                  + *
                • + *
                • + *

                  731

                  + *
                • + *
                + *

                For example, the following values are valid:

                + *
                  + *
                • + *

                  93 (3 months * 31)

                  + *
                • + *
                • + *

                  341 (11 months * 31)

                  + *
                • + *
                • + *

                  589 (19 months * 31)

                  + *
                • + *
                • + *

                  731

                  + *
                • + *
                *

                This setting is only for non-Aurora Multi-AZ DB clusters.

                */ PerformanceInsightsRetentionPeriod?: number; @@ -5819,11 +5908,13 @@ export interface CreateDBInstanceMessage { AllocatedStorage?: number; /** - *

                The compute and memory capacity of the DB instance, for example db.m4.large. + *

                The compute and memory capacity of the DB instance, for example db.m5.large. * Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. - * For the full list of DB instance classes, - * and availability for your engine, see - * DB Instance Class in the Amazon RDS User Guide.

                + * For the full list of DB instance classes, and availability for your engine, see + * DB instance + * classes in the Amazon RDS User Guide or + * Aurora + * DB instance classes in the Amazon Aurora User Guide.

                */ DBInstanceClass: string | undefined; @@ -5983,7 +6074,8 @@ export interface CreateDBInstanceMessage { /** *

                A list of DB security groups to associate with this DB instance.

                - *

                Default: The default DB security group for the database engine.

                + *

                This setting applies to the legacy EC2-Classic platform, which is no longer used to create + * new DB instances. Use the VpcSecurityGroupIds setting instead.

                */ DBSecurityGroups?: string[]; @@ -6074,7 +6166,7 @@ export interface CreateDBInstanceMessage { *

                Can't be set to 0 if the DB instance is a source to read replicas

                *
              • *
              • - *

                Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance

                + *

                Can't be set to 0 for an RDS Custom for Oracle DB instance

                *
              • *
              */ @@ -6165,12 +6257,17 @@ export interface CreateDBInstanceMessage { *

              A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set * the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

              *

              This setting doesn't apply to RDS Custom.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable. DB instance Availability Zones (AZs) are managed by the DB cluster.

              */ MultiAZ?: boolean; /** *

              The version number of the database engine to use.

              - *

              For a list of valid engine versions, use the DescribeDBEngineVersions action.

              + *

              For a list of valid engine versions, use the DescribeDBEngineVersions + * operation.

              *

              The following are the database engines and links to information about the major and minor versions that are available with * Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

              *

              @@ -6232,6 +6329,10 @@ export interface CreateDBInstanceMessage { *

              Valid values: license-included | bring-your-own-license | general-public-license *

              *

              This setting doesn't apply to RDS Custom.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable.

              */ LicenseModel?: string; @@ -6241,6 +6342,10 @@ export interface CreateDBInstanceMessage { *

              Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 * of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 * of the storage amount for the DB instance.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable. Storage is managed by the DB cluster.

              */ Iops?: number; @@ -6250,6 +6355,10 @@ export interface CreateDBInstanceMessage { * from an option group. Also, that option group can't be removed from a DB instance after it is * associated with a DB instance.

              *

              This setting doesn't apply to RDS Custom.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable.

              */ OptionGroupName?: string; @@ -6321,12 +6430,20 @@ export interface CreateDBInstanceMessage { *

              Default: io1 if the Iops parameter * is specified, otherwise gp2 *

              + *

              + * Amazon Aurora + *

              + *

              Not applicable. Storage is managed by the DB cluster.

              */ StorageType?: string; /** *

              The ARN from the key store with which to associate the instance for TDE encryption.

              *

              This setting doesn't apply to RDS Custom.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable.

              */ TdeCredentialArn?: string; @@ -6376,6 +6493,10 @@ export interface CreateDBInstanceMessage { *

              For more information, see * Kerberos Authentication in the Amazon RDS User Guide.

              *

              This setting doesn't apply to RDS Custom.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable. The domain is managed by the DB cluster.

              */ Domain?: string; @@ -6413,6 +6534,10 @@ export interface CreateDBInstanceMessage { /** *

              Specify the name of the IAM role to be used when making API calls to the Directory Service.

              *

              This setting doesn't apply to RDS Custom.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable. The domain is managed by the DB cluster.

              */ DomainIAMRoleName?: string; @@ -6437,11 +6562,14 @@ export interface CreateDBInstanceMessage { /** *

              A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management * (IAM) accounts to database accounts. By default, mapping isn't enabled.

              - *

              This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora, mapping Amazon Web Services IAM accounts - * to database accounts is managed by the DB cluster.

              *

              For more information, see - * - * IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

              + * + * IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

              + *

              This setting doesn't apply to RDS Custom.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.

              */ EnableIAMDatabaseAuthentication?: boolean; @@ -6463,7 +6591,35 @@ export interface CreateDBInstanceMessage { PerformanceInsightsKMSKeyId?: string; /** - *

              The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

              + *

              The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

              + *
                + *
              • + *

                7

                + *
              • + *
              • + *

                + * month * 31, where month is a number of months from 1-23

                + *
              • + *
              • + *

                731

                + *
              • + *
              + *

              For example, the following values are valid:

              + *
                + *
              • + *

                93 (3 months * 31)

                + *
              • + *
              • + *

                341 (11 months * 31)

                + *
              • + *
              • + *

                589 (19 months * 31)

                + *
              • + *
              • + *

                731

                + *
              • + *
              + *

              If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

              *

              This setting doesn't apply to RDS Custom.

              */ PerformanceInsightsRetentionPeriod?: number; @@ -6508,6 +6664,10 @@ export interface CreateDBInstanceMessage { /** *

              The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

              *

              This setting doesn't apply to RDS Custom.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable.

              */ ProcessorFeatures?: ProcessorFeature[]; @@ -6533,6 +6693,10 @@ export interface CreateDBInstanceMessage { * Managing capacity automatically with Amazon RDS storage autoscaling * in the Amazon RDS User Guide.

              *

              This setting doesn't apply to RDS Custom.

              + *

              + * Amazon Aurora + *

              + *

              Not applicable. Storage is managed by the DB cluster.

              */ MaxAllocatedStorage?: number; @@ -7482,7 +7646,34 @@ export interface DBInstance { PerformanceInsightsKMSKeyId?: string; /** - *

              The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

              + *

              The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

              + *
                + *
              • + *

                7

                + *
              • + *
              • + *

                + * month * 31, where month is a number of months from 1-23

                + *
              • + *
              • + *

                731

                + *
              • + *
              + *

              For example, the following values are valid:

              + *
                + *
              • + *

                93 (3 months * 31)

                + *
              • + *
              • + *

                341 (11 months * 31)

                + *
              • + *
              • + *

                589 (19 months * 31)

                + *
              • + *
              • + *

                731

                + *
              • + *
              */ PerformanceInsightsRetentionPeriod?: number; @@ -7910,8 +8101,8 @@ export interface CreateDBInstanceReadReplicaMessage { * uses the DBParameterGroup of source DB instance for a same Region read * replica, or the default DBParameterGroup for the specified DB engine for a * cross-Region read replica.

              - *

              Specifying a parameter group for this operation is only supported for Oracle DB instances. It - * isn't supported for RDS Custom.

              + *

              Specifying a parameter group for this operation is only supported for MySQL and Oracle DB instances. + * It isn't supported for RDS Custom.

              *

              Constraints:

              *
                *
              • @@ -8045,36 +8236,42 @@ export interface CreateDBInstanceReadReplicaMessage { KmsKeyId?: string; /** - *

                The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action - * in the source Amazon Web Services Region that contains the source DB instance.

                + *

                When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or + * from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4 + * signed request for the CreateDBInstanceReadReplica API operation in the + * source Amazon Web Services Region that contains the source DB instance.

                + *

                This setting applies only to Amazon Web Services GovCloud (US) Regions and + * China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions.

                *

                You must specify this parameter when you create an encrypted read replica from * another Amazon Web Services Region by using the Amazon RDS API. Don't specify * PreSignedUrl when you are creating an encrypted read replica in the * same Amazon Web Services Region.

                - *

                The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action - * that can be executed in the source Amazon Web Services Region that contains the encrypted source DB instance. - * The presigned URL request must contain the following parameter values:

                + *

                The presigned URL must be a valid request for the + * CreateDBInstanceReadReplica API operation that can run in the + * source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL + * request must contain the following parameter values:

                *
                  *
                • *

                  * DestinationRegion - The Amazon Web Services Region that the encrypted read * replica is created in. This Amazon Web Services Region is the same one where the - * CreateDBInstanceReadReplica action is called that contains this presigned URL.

                  - *

                  For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region, - * from a source DB instance in the us-east-2 Amazon Web Services Region, - * then you call the CreateDBInstanceReadReplica action in - * the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the - * CreateDBInstanceReadReplica action in the us-west-2 Amazon Web Services Region. For this - * example, the DestinationRegion in the presigned URL must be set to - * the us-east-1 Amazon Web Services Region.

                  + * CreateDBInstanceReadReplica operation is called that contains + * this presigned URL.

                  + *

                  For example, if you create an encrypted DB instance in the us-west-1 + * Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you + * call the CreateDBInstanceReadReplica operation in the us-east-1 + * Amazon Web Services Region and provide a presigned URL that contains a call to the + * CreateDBInstanceReadReplica operation in the us-west-2 + * Amazon Web Services Region. For this example, the DestinationRegion in the + * presigned URL must be set to the us-east-1 Amazon Web Services Region.

                  *
                • *
                • *

                  - * KmsKeyId - The Amazon Web Services KMS key identifier for the key to use to + * KmsKeyId - The KMS key identifier for the key to use to * encrypt the read replica in the destination Amazon Web Services Region. This is the same - * identifier for both the CreateDBInstanceReadReplica action that is - * called in the destination Amazon Web Services Region, and the action contained in the presigned - * URL.

                  + * identifier for both the CreateDBInstanceReadReplica operation that + * is called in the destination Amazon Web Services Region, and the operation contained in the + * presigned URL.

                  *
                • *
                • *

                  @@ -8095,9 +8292,9 @@ export interface CreateDBInstanceReadReplicaMessage { * SourceRegion (or --source-region for the CLI) * instead of specifying PreSignedUrl manually. Specifying * SourceRegion autogenerates a presigned URL that is a valid request - * for the operation that can be executed in the source Amazon Web Services Region.

                  + * for the operation that can run in the source Amazon Web Services Region.

                  *

                  - * SourceRegion isn't supported for SQL Server, because SQL Server on Amazon RDS + * SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server * doesn't support cross-Region read replicas.

                  * *

                  This setting doesn't apply to RDS Custom.

                  @@ -8133,7 +8330,36 @@ export interface CreateDBInstanceReadReplicaMessage { PerformanceInsightsKMSKeyId?: string; /** - *

                  The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                  + *

                  The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                  + *
                    + *
                  • + *

                    7

                    + *
                  • + *
                  • + *

                    + * month * 31, where month is a number of months from 1-23

                    + *
                  • + *
                  • + *

                    731

                    + *
                  • + *
                  + *

                  For example, the following values are valid:

                  + *
                    + *
                  • + *

                    93 (3 months * 31)

                    + *
                  • + *
                  • + *

                    341 (11 months * 31)

                    + *
                  • + *
                  • + *

                    589 (19 months * 31)

                    + *
                  • + *
                  • + *

                    731

                    + *
                  • + *
                  + *

                  If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                  + * *

                  This setting doesn't apply to RDS Custom.

                  */ PerformanceInsightsRetentionPeriod?: number; @@ -8538,8 +8764,8 @@ export interface CreateDBProxyRequest { /** *

                  The kinds of databases that the proxy can connect to. * This value determines which database network protocol the proxy recognizes when it interprets - * network traffic to and from the database. - * The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

                  + * network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify MYSQL. + * For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL.

                  */ EngineFamily: EngineFamily | string | undefined; @@ -8675,7 +8901,9 @@ export interface DBProxy { Status?: DBProxyStatus | string; /** - *

                  The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

                  + *

                  The kinds of databases that the proxy can connect to. This value determines which database network protocol + * the proxy recognizes when it interprets network traffic to and from the database. MYSQL supports Aurora MySQL, + * RDS for MariaDB, and RDS for MySQL databases. POSTGRESQL supports Aurora PostgreSQL and RDS for PostgreSQL databases.

                  */ EngineFamily?: string; @@ -9597,8 +9825,9 @@ export interface CreateGlobalClusterMessage { DeletionProtection?: boolean; /** - *

                  The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon - * Aurora will not create a database in the global database cluster you are creating.

                  + *

                  The name for your database of up to 64 alphanumeric characters. If you do not provide + * a name, Amazon Aurora will not create a database in the global database cluster you are + * creating.

                  */ DatabaseName?: string; diff --git a/clients/client-rds/src/models/models_1.ts b/clients/client-rds/src/models/models_1.ts index 3738acca2f311..02e3a869c0c33 100644 --- a/clients/client-rds/src/models/models_1.ts +++ b/clients/client-rds/src/models/models_1.ts @@ -117,7 +117,6 @@ export namespace DBSnapshotMessage { export interface DescribeDBSnapshotsMessage { /** *

                  The ID of the DB instance to retrieve the list of DB snapshots for. - * This parameter can't be used in conjunction with DBSnapshotIdentifier. * This parameter isn't case-sensitive.

                  *

                  Constraints:

                  *
                    @@ -129,7 +128,7 @@ export interface DescribeDBSnapshotsMessage { DBInstanceIdentifier?: string; /** - *

                    A specific DB snapshot identifier to describe. This parameter can't be used in conjunction with DBInstanceIdentifier. + *

                    A specific DB snapshot identifier to describe. * This value is stored as a lowercase string.

                    *

                    Constraints:

                    *
                      @@ -710,7 +709,8 @@ export namespace DescribeEventCategoriesMessage { } /** - *

                      Contains the results of a successful invocation of the DescribeEventCategories operation.

                      + *

                      Contains the results of a successful invocation of the DescribeEventCategories + * operation.

                      */ export interface EventCategoriesMap { /** @@ -870,7 +870,7 @@ export namespace DescribeEventsMessage { } /** - *

                      This data type is used as a response element in the DescribeEvents action.

                      + *

                      This data type is used as a response element in the DescribeEvents action.

                      */ export interface Event { /** @@ -1789,7 +1789,7 @@ export interface DescribeOrderableDBInstanceOptionsMessage { * a pagination token called a marker is included in the response so that * you can retrieve the remaining results.

                      *

                      Default: 100

                      - *

                      Constraints: Minimum 20, maximum 100.

                      + *

                      Constraints: Minimum 20, maximum 10000.

                      */ MaxRecords?: number; @@ -3766,7 +3766,35 @@ export interface ModifyDBClusterMessage { PerformanceInsightsKMSKeyId?: string; /** - *

                      The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                      + *

                      The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                      + *
                        + *
                      • + *

                        7

                        + *
                      • + *
                      • + *

                        + * month * 31, where month is a number of months from 1-23

                        + *
                      • + *
                      • + *

                        731

                        + *
                      • + *
                      + *

                      For example, the following values are valid:

                      + *
                        + *
                      • + *

                        93 (3 months * 31)

                        + *
                      • + *
                      • + *

                        341 (11 months * 31)

                        + *
                      • + *
                      • + *

                        589 (19 months * 31)

                        + *
                      • + *
                      • + *

                        731

                        + *
                      • + *
                      + *

                      If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                      *

                      Valid for: Multi-AZ DB clusters only

                      */ PerformanceInsightsRetentionPeriod?: number; @@ -3937,7 +3965,7 @@ export interface ModifyDBClusterSnapshotAttributeMessage { * set this value to restore.

                      * *

                      To view the list of attributes available to modify, use the - * DescribeDBClusterSnapshotAttributes API action.

                      + * DescribeDBClusterSnapshotAttributes API operation.

                      *
                      */ AttributeName: string | undefined; @@ -4059,11 +4087,13 @@ export interface ModifyDBInstanceMessage { AllocatedStorage?: number; /** - *

                      The new compute and memory capacity of the DB instance, for example db.m4.large. + *

                      The new compute and memory capacity of the DB instance, for example db.m5.large. * Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. - * For the full list of DB instance classes, - * and availability for your engine, see - * DB Instance Class in the Amazon RDS User Guide.

                      + * For the full list of DB instance classes, and availability for your engine, see + * DB instance + * classes in the Amazon RDS User Guide or + * Aurora + * DB instance classes in the Amazon Aurora User Guide.

                      *

                      If you modify the DB instance class, an outage occurs during the change. * The change is applied during the next maintenance window, * unless ApplyImmediately is enabled for this request.

                      @@ -4168,7 +4198,7 @@ export interface ModifyDBInstanceMessage { *

                      *

                      Constraints: Must contain from 8 to 128 characters.

                      * - *

                      Amazon RDS API actions never return the password, + *

                      Amazon RDS API operations never return the password, * so this action provides a way to regain access to a primary instance user if the password is lost. * This includes restoring privileges that might have been accidentally revoked.

                      *
                      @@ -4207,7 +4237,7 @@ export interface ModifyDBInstanceMessage { *
                        *
                      • *

                        It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to - * read replicas. It can't be set to 0 or 35 for an RDS Custom for Oracle DB instance.

                        + * read replicas. It can't be set to 0 for an RDS Custom for Oracle DB instance.

                        *
                      • *
                      • *

                        It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or @@ -4585,7 +4615,7 @@ export interface ModifyDBInstanceMessage { /** *

                        A value that indicates whether to enable Performance Insights for the DB instance.

                        *

                        For more information, see - * Using Amazon Performance Insights in the Amazon RDS User Guide..

                        + * Using Amazon Performance Insights in the Amazon RDS User Guide.

                        *

                        This setting doesn't apply to RDS Custom.

                        */ EnablePerformanceInsights?: boolean; @@ -4601,7 +4631,35 @@ export interface ModifyDBInstanceMessage { PerformanceInsightsKMSKeyId?: string; /** - *

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        + *

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        + *
                          + *
                        • + *

                          7

                          + *
                        • + *
                        • + *

                          + * month * 31, where month is a number of months from 1-23

                          + *
                        • + *
                        • + *

                          731

                          + *
                        • + *
                        + *

                        For example, the following values are valid:

                        + *
                          + *
                        • + *

                          93 (3 months * 31)

                          + *
                        • + *
                        • + *

                          341 (11 months * 31)

                          + *
                        • + *
                        • + *

                          589 (19 months * 31)

                          + *
                        • + *
                        • + *

                          731

                          + *
                        • + *
                        + *

                        If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                        *

                        This setting doesn't apply to RDS Custom.

                        */ PerformanceInsightsRetentionPeriod?: number; @@ -5023,12 +5081,12 @@ export namespace ConnectionPoolConfiguration { export interface ModifyDBProxyTargetGroupRequest { /** - *

                        The name of the new target group to assign to the proxy.

                        + *

                        The name of the target group to modify.

                        */ TargetGroupName: string | undefined; /** - *

                        The name of the new proxy to which to assign the target group.

                        + *

                        The name of the proxy.

                        */ DBProxyName: string | undefined; @@ -5165,7 +5223,7 @@ export interface ModifyDBSnapshotAttributeMessage { * set this value to restore.

                        * *

                        To view the list of attributes available to modify, use the - * DescribeDBSnapshotAttributes API action.

                        + * DescribeDBSnapshotAttributes API operation.

                        *
                        */ AttributeName: string | undefined; @@ -6370,8 +6428,8 @@ export interface RestoreDBClusterFromS3Message { /** *

                        The name of the database engine to be used for this DB cluster.

                        - *

                        Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql - *

                        + *

                        Valid Values: aurora (for MySQL 5.6-compatible Aurora) and aurora-mysql + * (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

                        */ Engine: string | undefined; @@ -6385,19 +6443,11 @@ export interface RestoreDBClusterFromS3Message { *

                        * aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" *

                        - *

                        To list all of the available engine versions for aurora-postgresql, use the following command:

                        - *

                        - * aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" - *

                        *

                        * Aurora MySQL *

                        - *

                        Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5, 8.0.mysql_aurora.3.01.0 - *

                        - *

                        - * Aurora PostgreSQL - *

                        - *

                        Example: 9.6.3, 10.7 + *

                        Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.mysql_aurora.2.07.1, + * 8.0.mysql_aurora.3.02.0 *

                        */ EngineVersion?: string; @@ -6908,7 +6958,7 @@ export interface RestoreDBClusterFromSnapshotMessage { * Aurora PostgreSQL *

                        *

                        Possible value is postgresql.

                        - *

                        For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..

                        + *

                        For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

                        *

                        For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                        *

                        Valid for: Aurora DB clusters and Multi-AZ DB clusters

                        */ @@ -7283,7 +7333,7 @@ export interface RestoreDBClusterToPointInTimeMessage { * Aurora PostgreSQL *

                        *

                        Possible value is postgresql.

                        - *

                        For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..

                        + *

                        For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

                        *

                        For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                        *

                        Valid for: Aurora DB clusters and Multi-AZ DB clusters

                        */ @@ -8249,7 +8299,7 @@ export interface RestoreDBInstanceFromS3Message { /** *

                        A value that indicates whether to enable Performance Insights for the DB instance.

                        *

                        For more information, see - * Using Amazon Performance Insights in the Amazon RDS User Guide..

                        + * Using Amazon Performance Insights in the Amazon RDS User Guide.

                        */ EnablePerformanceInsights?: boolean; @@ -8263,7 +8313,35 @@ export interface RestoreDBInstanceFromS3Message { PerformanceInsightsKMSKeyId?: string; /** - *

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        + *

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        + *
                          + *
                        • + *

                          7

                          + *
                        • + *
                        • + *

                          + * month * 31, where month is a number of months from 1-23

                          + *
                        • + *
                        • + *

                          731

                          + *
                        • + *
                        + *

                        For example, the following values are valid:

                        + *
                          + *
                        • + *

                          93 (3 months * 31)

                          + *
                        • + *
                        • + *

                          341 (11 months * 31)

                          + *
                        • + *
                        • + *

                          589 (19 months * 31)

                          + *
                        • + *
                        • + *

                          731

                          + *
                        • + *
                        + *

                        If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                        */ PerformanceInsightsRetentionPeriod?: number; @@ -9104,10 +9182,25 @@ export interface StartDBInstanceAutomatedBackupsReplicationMessage { KmsKeyId?: string; /** - *

                        A URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication action to be - * called in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the - * StartDBInstanceAutomatedBackupsReplication API action that can be executed in the Amazon Web Services Region that contains - * the source DB instance.

                        + *

                        In an Amazon Web Services GovCloud (US) Region, an URL that contains a Signature Version 4 signed request + * for the StartDBInstanceAutomatedBackupsReplication operation to call + * in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the + * StartDBInstanceAutomatedBackupsReplication API operation that can run in + * the Amazon Web Services Region that contains the source DB instance.

                        + *

                        This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other + * Amazon Web Services Regions.

                        + *

                        To learn how to generate a Signature Version 4 signed request, see + * + * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and + * + * Signature Version 4 Signing Process.

                        + * + *

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify + * SourceRegion (or --source-region for the CLI) + * instead of specifying PreSignedUrl manually. Specifying + * SourceRegion autogenerates a presigned URL that is a valid request + * for the operation that can run in the source Amazon Web Services Region.

                        + *
                        */ PreSignedUrl?: string; } diff --git a/codegen/sdk-codegen/aws-models/rds.json b/codegen/sdk-codegen/aws-models/rds.json index 5c8e746132a40..c289c721cae10 100644 --- a/codegen/sdk-codegen/aws-models/rds.json +++ b/codegen/sdk-codegen/aws-models/rds.json @@ -1083,7 +1083,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Backtracks a DB cluster to a specific time, without creating a new DB cluster.

                        \n

                        For more information on backtracking, see \n \n Backtracking an Aurora DB Cluster in the \n Amazon Aurora User Guide.

                        \n \n

                        This action only applies to Aurora MySQL DB clusters.

                        \n
                        " + "smithy.api#documentation": "

                        Backtracks a DB cluster to a specific time, without creating a new DB cluster.

                        \n

                        For more information on backtracking, see \n \n Backtracking an Aurora DB Cluster in the \n Amazon Aurora User Guide.

                        \n \n

                        This action applies only to Aurora MySQL DB clusters.

                        \n
                        " } }, "com.amazonaws.rds#BacktrackDBClusterMessage": { @@ -1532,7 +1532,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Copies a snapshot of a DB cluster.

                        \n

                        To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier\n must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

                        \n

                        You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot action \n is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, \n you must provide the following values:

                        \n
                          \n
                        • \n

                          \n KmsKeyId - The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key to use to \n encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region.

                          \n
                        • \n
                        • \n

                          \n PreSignedUrl - A URL that contains a Signature Version 4 signed request for the \n CopyDBClusterSnapshot action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from. \n The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be executed in the \n source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.

                          \n

                          The pre-signed URL request must contain the following parameter values:

                          \n
                            \n
                          • \n

                            \n KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB \n cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot \n action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.

                            \n
                          • \n
                          • \n

                            \n DestinationRegion - The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.

                            \n
                          • \n
                          • \n

                            \n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster \n snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, \n if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier\n looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

                            \n
                          • \n
                          \n

                          To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.

                          \n \n

                          If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) \n instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.

                          \n
                          \n
                        • \n
                        • \n

                          \n TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.

                          \n
                        • \n
                        • \n

                          \n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. \n This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier in the pre-signed URL.

                          \n
                        • \n
                        \n

                        To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified\n by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

                        \n

                        For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see \n \n Copying a Snapshot in the Amazon Aurora User Guide.

                        \n

                        For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

                        " + "smithy.api#documentation": "

                        Copies a snapshot of a DB cluster.

                        \n

                        To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier\n must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

                        \n

                        You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case,\n the Amazon Web Services Region where you call the CopyDBClusterSnapshot operation is the\n destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy\n an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the\n following values:

                        \n
                          \n
                        • \n

                          \n KmsKeyId - The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key to use to \n encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region.

                          \n
                        • \n
                        • \n

                          \n TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.

                          \n
                        • \n
                        • \n

                          \n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot\n identifier for the encrypted DB cluster snapshot to be copied. This identifier\n must be in the ARN format for the source Amazon Web Services Region and is the same value as\n the SourceDBClusterSnapshotIdentifier in the presigned URL.

                          \n
                        • \n
                        \n

                        To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified\n by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

                        \n

                        For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see \n \n Copying a Snapshot in the Amazon Aurora User Guide.

                        \n

                        For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

                        " } }, "com.amazonaws.rds#CopyDBClusterSnapshotMessage": { @@ -1561,7 +1561,7 @@ "PreSignedUrl": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot API action in the Amazon Web Services Region that contains the \n source DB cluster snapshot to copy. The PreSignedUrl parameter must be used when copying an encrypted DB cluster snapshot from another Amazon Web Services Region. \n Don't specify PreSignedUrl when you are copying an encrypted DB cluster snapshot in the same Amazon Web Services Region.

                        \n

                        The pre-signed URL must be a valid request for the CopyDBClusterSnapshot API action that can be\n executed in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied. \n The pre-signed URL request must contain the following parameter values:

                        \n
                          \n
                        • \n

                          \n KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB \n cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot \n action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.

                          \n
                        • \n
                        • \n

                          \n DestinationRegion - The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.

                          \n
                        • \n
                        • \n

                          \n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster \n snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, \n if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier\n looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

                          \n
                        • \n
                        \n

                        To learn how to generate a Signature Version 4 signed request, see \n\n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.

                        \n \n

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) \n instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.

                        \n
                        " + "smithy.api#documentation": "

                        When you are copying a DB cluster snapshot from one Amazon Web Services GovCloud (US) Region\n to another, the URL that contains a Signature Version 4 signed request for the\n CopyDBClusterSnapshot API operation in the Amazon Web Services Region that contains\n the source DB cluster snapshot to copy. Use the PreSignedUrl parameter when\n copying an encrypted DB cluster snapshot from another Amazon Web Services Region. Don't specify\n PreSignedUrl when copying an encrypted DB cluster snapshot in the same\n Amazon Web Services Region.

                        \n

                        This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other\n Amazon Web Services Regions.

                        \n

                        The presigned URL must be a valid request for the\n CopyDBClusterSnapshot API operation that can run in the source\n Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request\n must contain the following parameter values:

                        \n
                          \n
                        • \n

                          \n KmsKeyId - The KMS key identifier for the KMS key\n to use to encrypt the copy of the DB cluster snapshot in the destination\n Amazon Web Services Region. This is the same identifier for both the\n CopyDBClusterSnapshot operation that is called in the\n destination Amazon Web Services Region, and the operation contained in the presigned\n URL.

                          \n
                        • \n
                        • \n

                          \n DestinationRegion - The name of the Amazon Web Services Region \n that the DB cluster snapshot is to be created in.

                          \n
                        • \n
                        • \n

                          \n SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the encrypted DB cluster \n snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, \n if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier\n looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115.

                          \n
                        • \n
                        \n

                        To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.

                        \n \n

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion (or --source-region for the CLI)\n instead of specifying PreSignedUrl manually. Specifying\n SourceRegion autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.

                        \n
                        " } }, "CopyTags": { @@ -1678,7 +1678,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Copies the specified DB snapshot. The source DB snapshot must be in the available state.

                        \n

                        You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region\n where you call the CopyDBSnapshot action is the destination Amazon Web Services Region for the\n DB snapshot copy.

                        \n

                        This command doesn't apply to RDS Custom.

                        \n

                        For more information about copying snapshots, see \n Copying a DB Snapshot in the Amazon RDS User Guide.

                        " + "smithy.api#documentation": "

                        Copies the specified DB snapshot. The source DB snapshot must be in the available state.

                        \n

                        You can copy a snapshot from one Amazon Web Services Region to another. In that case, the\n Amazon Web Services Region where you call the CopyDBSnapshot operation is the destination\n Amazon Web Services Region for the DB snapshot copy.

                        \n

                        This command doesn't apply to RDS Custom.

                        \n

                        For more information about copying snapshots, see \n Copying a DB Snapshot in the Amazon RDS User Guide.

                        " } }, "com.amazonaws.rds#CopyDBSnapshotMessage": { @@ -1687,7 +1687,7 @@ "SourceDBSnapshotIdentifier": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The identifier for the source DB snapshot.

                        \n

                        If the source snapshot is in the same Amazon Web Services Region as the copy, specify a valid DB\n snapshot identifier. For example, you might specify\n rds:mysql-instance1-snapshot-20130805.

                        \n

                        If the source snapshot is in a different Amazon Web Services Region than the copy, specify a valid DB\n snapshot ARN. For example, you might specify\n arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805.

                        \n

                        If you are copying from a shared manual DB snapshot, \n this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.

                        \n

                        If you are copying an encrypted snapshot\n this parameter must be in the ARN format for the source Amazon Web Services Region, \n and must match the SourceDBSnapshotIdentifier in the PreSignedUrl parameter.

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          Must specify a valid system snapshot in the \"available\" state.

                          \n
                        • \n
                        \n

                        Example: rds:mydb-2012-04-02-00-01\n

                        \n

                        Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805\n

                        ", + "smithy.api#documentation": "

                        The identifier for the source DB snapshot.

                        \n

                        If the source snapshot is in the same Amazon Web Services Region as the copy, specify a valid DB\n snapshot identifier. For example, you might specify\n rds:mysql-instance1-snapshot-20130805.

                        \n

                        If the source snapshot is in a different Amazon Web Services Region than the copy, specify a valid DB\n snapshot ARN. For example, you might specify\n arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805.

                        \n

                        If you are copying from a shared manual DB snapshot, \n this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.

                        \n

                        If you are copying an encrypted snapshot this parameter must be in the ARN format for the source Amazon Web Services Region.

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          Must specify a valid system snapshot in the \"available\" state.

                          \n
                        • \n
                        \n

                        Example: rds:mydb-2012-04-02-00-01\n

                        \n

                        Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805\n

                        ", "smithy.api#required": {} } }, @@ -1716,7 +1716,7 @@ "PreSignedUrl": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The URL that contains a Signature Version 4 signed request for the\n CopyDBSnapshot API action in the source Amazon Web Services Region that contains the\n source DB snapshot to copy.

                        \n

                        You must specify this parameter when you copy an encrypted DB snapshot from another\n Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are \n copying an encrypted DB snapshot in the same Amazon Web Services Region.

                        \n

                        The presigned URL must be a valid request for the CopyDBSnapshot API action \n that can be executed in the source Amazon Web Services Region that contains the encrypted DB snapshot to be copied. \n The presigned URL request must contain the following parameter values:

                        \n
                          \n
                        • \n

                          \n DestinationRegion - The Amazon Web Services Region that the encrypted DB snapshot is copied to. \n This Amazon Web Services Region is the same one where the CopyDBSnapshot action is called that contains this presigned URL.

                          \n

                          For example, if you copy an encrypted DB snapshot from the us-west-2 Amazon Web Services Region\n to the us-east-1 Amazon Web Services Region, then you call the CopyDBSnapshot action in\n the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the\n CopyDBSnapshot action in the us-west-2 Amazon Web Services Region. For this\n example, the DestinationRegion in the presigned URL must be set to\n the us-east-1 Amazon Web Services Region.

                          \n
                        • \n
                        • \n

                          \n KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. \n This is the same identifier for both the CopyDBSnapshot action that is called in the destination Amazon Web Services Region, \n and the action contained in the presigned URL.

                          \n
                        • \n
                        • \n

                          \n SourceDBSnapshotIdentifier - The DB snapshot identifier for the encrypted snapshot to be copied. \n This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. \n For example, if you are copying an encrypted DB snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBSnapshotIdentifier looks like\n the following example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115.

                          \n
                        • \n
                        \n

                        To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.

                        \n \n

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) \n instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.

                        \n
                        " + "smithy.api#documentation": "

                        When you are copying a snapshot from one Amazon Web Services GovCloud (US) Region to another, \n the URL that contains a Signature Version 4 signed request for the CopyDBSnapshot API \n operation in the source Amazon Web Services Region that contains the source DB snapshot to copy.

                        \n

                        This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other\n Amazon Web Services Regions.

                        \n

                        You must specify this parameter when you copy an encrypted DB snapshot from another\n Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are \n copying an encrypted DB snapshot in the same Amazon Web Services Region.

                        \n

                        The presigned URL must be a valid request for the\n CopyDBClusterSnapshot API operation that can run in the source\n Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request\n must contain the following parameter values:

                        \n
                          \n
                        • \n

                          \n DestinationRegion - The Amazon Web Services Region that the encrypted DB\n snapshot is copied to. This Amazon Web Services Region is the same one where the\n CopyDBSnapshot operation is called that contains this presigned\n URL.

                          \n

                          For example, if you copy an encrypted DB snapshot from the us-west-2\n Amazon Web Services Region to the us-east-1 Amazon Web Services Region, then you call the\n CopyDBSnapshot operation in the us-east-1 Amazon Web Services Region and\n provide a presigned URL that contains a call to the CopyDBSnapshot\n operation in the us-west-2 Amazon Web Services Region. For this example, the\n DestinationRegion in the presigned URL must be set to the\n us-east-1 Amazon Web Services Region.

                          \n
                        • \n
                        • \n

                          \n KmsKeyId - The KMS key identifier for the KMS key to use to\n encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. This is the\n same identifier for both the CopyDBSnapshot operation that is\n called in the destination Amazon Web Services Region, and the operation contained in the\n presigned URL.

                          \n
                        • \n
                        • \n

                          \n SourceDBSnapshotIdentifier - The DB snapshot identifier for the encrypted snapshot to be copied. \n This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. \n For example, if you are copying an encrypted DB snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBSnapshotIdentifier looks like\n the following example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115.

                          \n
                        • \n
                        \n

                        To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.

                        \n \n

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion (or --source-region for the CLI)\n instead of specifying PreSignedUrl manually. Specifying\n SourceRegion autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.

                        \n
                        " } }, "OptionGroupName": { @@ -1950,7 +1950,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.

                        \n

                        You can use the ReplicationSourceIdentifier parameter to create an Amazon\n Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or\n PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by\n ReplicationSourceIdentifier is encrypted, also specify the\n PreSignedUrl parameter.

                        \n

                        For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

                        " + "smithy.api#documentation": "

                        Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.

                        \n

                        You can use the ReplicationSourceIdentifier parameter to create an Amazon\n Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or\n PostgreSQL DB instance.

                        \n

                        For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

                        " } }, "com.amazonaws.rds#CreateDBClusterEndpoint": { @@ -1982,7 +1982,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.

                        \n \n

                        This action only applies to Aurora DB clusters.

                        \n
                        " + "smithy.api#documentation": "

                        Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.

                        \n \n

                        This action applies only to Aurora DB clusters.

                        \n
                        " } }, "com.amazonaws.rds#CreateDBClusterEndpointMessage": { @@ -2157,13 +2157,13 @@ "PreSignedUrl": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        A URL that contains a Signature Version 4 signed request for \n the CreateDBCluster action to be called in the source Amazon Web Services Region where the DB cluster is replicated from. \n Specify PreSignedUrl only when you are performing cross-Region replication from an encrypted DB cluster.

                        \n

                        The pre-signed URL must be a valid request for the CreateDBCluster API action \n that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster to be copied.

                        \n

                        The pre-signed URL request must contain the following parameter values:

                        \n
                          \n
                        • \n

                          \n KmsKeyId - The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of \n the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster \n action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.

                          \n
                        • \n
                        • \n

                          \n DestinationRegion - The name of the Amazon Web Services Region that Aurora read replica will\n be created in.

                          \n
                        • \n
                        • \n

                          \n ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. \n This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an \n encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier would look like\n Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1.

                          \n
                        • \n
                        \n

                        To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.

                        \n \n

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) \n instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.

                        \n
                        \n

                        Valid for: Aurora DB clusters only

                        " + "smithy.api#documentation": "

                        When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another,\n an URL that contains a Signature Version 4 signed request for the\n CreateDBCluster operation to be called in the source Amazon Web Services Region where\n the DB cluster is replicated from. Specify PreSignedUrl only when you are\n performing cross-Region replication from an encrypted DB cluster.

                        \n \n

                        The presigned URL must be a valid request for the CreateDBCluster API\n operation that can run in the source Amazon Web Services Region that contains the encrypted DB\n cluster to copy.

                        \n

                        The presigned URL request must contain the following parameter values:

                        \n
                          \n
                        • \n

                          \n KmsKeyId - The KMS key identifier for the KMS key to use to\n encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should\n refer to the same KMS key for both the CreateDBCluster operation\n that is called in the destination Amazon Web Services Region, and the operation contained in\n the presigned URL.

                          \n
                        • \n
                        • \n

                          \n DestinationRegion - The name of the Amazon Web Services Region that Aurora read replica will\n be created in.

                          \n
                        • \n
                        • \n

                          \n ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB cluster to be copied. \n This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an \n encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier would look like\n Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1.

                          \n
                        • \n
                        \n

                        To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.

                        \n \n

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion (or --source-region for the CLI)\n instead of specifying PreSignedUrl manually. Specifying\n SourceRegion autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.

                        \n
                        \n

                        Valid for: Aurora DB clusters only

                        " } }, "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

                        A value that indicates whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.

                        \n

                        For more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide..

                        \n

                        Valid for: Aurora DB clusters only

                        " + "smithy.api#documentation": "

                        A value that indicates whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.

                        \n

                        For more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide.

                        \n

                        Valid for: Aurora DB clusters only

                        " } }, "BacktrackWindow": { @@ -2181,7 +2181,7 @@ "EngineMode": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The DB engine mode of the DB cluster, either provisioned, serverless, \n parallelquery, global, or multimaster.

                        \n

                        The parallelquery engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, \n and version 2.09 and higher 2.x versions.

                        \n

                        The global engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, \n and global engine mode isn't required for any 2.x versions.

                        \n

                        The multimaster engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.

                        \n

                        For Aurora PostgreSQL, the global engine mode isn't required, and both the parallelquery \n and the multimaster engine modes currently aren't supported.

                        \n

                        Limitations and requirements apply to some DB engine modes. For more information, see the \n following sections in the Amazon Aurora User Guide:

                        \n \n

                        Valid for: Aurora DB clusters only

                        " + "smithy.api#documentation": "

                        The DB engine mode of the DB cluster, either provisioned, serverless, \n parallelquery, global, or multimaster.

                        \n

                        The parallelquery engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, \n and version 2.09 and higher 2.x versions.

                        \n

                        The global engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, \n and global engine mode isn't required for any 2.x versions.

                        \n

                        The multimaster engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.

                        \n

                        The serverless engine mode only applies for Aurora Serverless v1 DB clusters.

                        \n

                        For Aurora PostgreSQL, the global engine mode isn't required, and both the parallelquery \n and the multimaster engine modes currently aren't supported.

                        \n

                        Limitations and requirements apply to some DB engine modes. For more information, see the \n following sections in the Amazon Aurora User Guide:

                        \n \n

                        Valid for: Aurora DB clusters only

                        " } }, "ScalingConfiguration": { @@ -2295,7 +2295,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        \n

                        Valid for: Multi-AZ DB clusters only

                        " + "smithy.api#documentation": "

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        \n
                          \n
                        • \n

                          7

                          \n
                        • \n
                        • \n

                          \n month * 31, where month is a number of months from 1-23

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        For example, the following values are valid:

                        \n
                          \n
                        • \n

                          93 (3 months * 31)

                          \n
                        • \n
                        • \n

                          341 (11 months * 31)

                          \n
                        • \n
                        • \n

                          589 (19 months * 31)

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                        \n

                        Valid for: Multi-AZ DB clusters only

                        " } }, "ServerlessV2ScalingConfiguration": { @@ -2323,7 +2323,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Creates a new DB cluster parameter group.

                        \n

                        Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

                        \n

                        A DB cluster parameter group is initially created with the default parameters for the\n database engine used by instances in the DB cluster. To provide custom values for any of the\n parameters, you must modify the group after creating it using\n ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to\n associate it with your DB cluster using ModifyDBCluster.

                        \n

                        When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB\n instances in the DB cluster without failover for the new DB cluster parameter group and \n associated settings to take effect.

                        \n

                        When you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB\n cluster without failover for the new DB cluster parameter group and associated settings to take effect.

                        \n \n

                        After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster\n that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters action to verify \n that your DB cluster parameter group has been created or modified.

                        \n
                        \n

                        For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

                        " + "smithy.api#documentation": "

                        Creates a new DB cluster parameter group.

                        \n

                        Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

                        \n

                        A DB cluster parameter group is initially created with the default parameters for the\n database engine used by instances in the DB cluster. To provide custom values for any of the\n parameters, you must modify the group after creating it using\n ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to\n associate it with your DB cluster using ModifyDBCluster.

                        \n

                        When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB\n instances in the DB cluster without failover for the new DB cluster parameter group and \n associated settings to take effect.

                        \n

                        When you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB\n cluster without failover for the new DB cluster parameter group and associated settings to take effect.

                        \n \n

                        After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster that uses that DB cluster parameter group as\n the default parameter group. This allows Amazon RDS to fully complete the create\n action before the DB cluster parameter group is used as the default for a new DB\n cluster. This is especially important for parameters that are critical when creating\n the default database for a DB cluster, such as the character set for the default\n database defined by the character_set_database parameter. You can use\n the Parameter Groups option of the Amazon RDS console or the\n DescribeDBClusterParameters operation to verify that your DB\n cluster parameter group has been created or modified.

                        \n
                        \n

                        For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

                        " } }, "com.amazonaws.rds#CreateDBClusterParameterGroupMessage": { @@ -2513,7 +2513,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Creates a new DB instance.

                        " + "smithy.api#documentation": "

                        Creates a new DB instance.

                        \n

                        The new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster. \n For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance \n to the cluster.

                        \n

                        For more information about creating an RDS DB instance, see \n Creating an Amazon RDS DB instance in the Amazon RDS User Guide.

                        \n

                        For more information about creating a DB instance in an Aurora DB cluster, see \n \n Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.

                        " } }, "com.amazonaws.rds#CreateDBInstanceMessage": { @@ -2541,7 +2541,7 @@ "DBInstanceClass": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The compute and memory capacity of the DB instance, for example db.m4.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes,\n and availability for your engine, see\n DB Instance Class in the Amazon RDS User Guide.

                        ", + "smithy.api#documentation": "

                        The compute and memory capacity of the DB instance, for example db.m5.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes, and availability for your engine, see\n DB instance \n classes in the Amazon RDS User Guide or \n Aurora \n DB instance classes in the Amazon Aurora User Guide.

                        ", "smithy.api#required": {} } }, @@ -2567,7 +2567,7 @@ "DBSecurityGroups": { "target": "com.amazonaws.rds#DBSecurityGroupNameList", "traits": { - "smithy.api#documentation": "

                        A list of DB security groups to associate with this DB instance.

                        \n

                        Default: The default DB security group for the database engine.

                        " + "smithy.api#documentation": "

                        A list of DB security groups to associate with this DB instance.

                        \n

                        This setting applies to the legacy EC2-Classic platform, which is no longer used to create \n new DB instances. Use the VpcSecurityGroupIds setting instead.

                        " } }, "VpcSecurityGroupIds": { @@ -2603,7 +2603,7 @@ "BackupRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The number of days for which automated backups are retained. Setting this parameter to a positive number enables \n backups. Setting this parameter to 0 disables automated backups.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The retention period for automated backups is managed by the DB cluster.

                        \n

                        Default: 1

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          Must be a value from 0 to 35

                          \n
                        • \n
                        • \n

                          Can't be set to 0 if the DB instance is a source to read replicas

                          \n
                        • \n
                        • \n

                          Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance

                          \n
                        • \n
                        " + "smithy.api#documentation": "

                        The number of days for which automated backups are retained. Setting this parameter to a positive number enables \n backups. Setting this parameter to 0 disables automated backups.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The retention period for automated backups is managed by the DB cluster.

                        \n

                        Default: 1

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          Must be a value from 0 to 35

                          \n
                        • \n
                        • \n

                          Can't be set to 0 if the DB instance is a source to read replicas

                          \n
                        • \n
                        • \n

                          Can't be set to 0 for an RDS Custom for Oracle DB instance

                          \n
                        • \n
                        " } }, "PreferredBackupWindow": { @@ -2621,13 +2621,13 @@ "MultiAZ": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

                        A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set \n the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set \n the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. DB instance Availability Zones (AZs) are managed by the DB cluster.

                        " } }, "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The version number of the database engine to use.

                        \n

                        For a list of valid engine versions, use the DescribeDBEngineVersions action.

                        \n

                        The following are the database engines and links to information about the major and minor versions that are available with \n Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The version number of the database engine to be used by the DB\n instance is managed by the DB cluster.

                        \n

                        \n Amazon RDS Custom for Oracle\n

                        \n

                        A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV \n name has the following format: 19.customized_string\n . An example identifier is \n 19.my_cev1. For more information, see \n Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.

                        \n

                        \n Amazon RDS Custom for SQL Server\n

                        \n

                        See RDS Custom for SQL Server general requirements \n in the Amazon RDS User Guide.

                        \n

                        \n MariaDB\n

                        \n

                        For information, see MariaDB on Amazon RDS Versions in the \n Amazon RDS User Guide.

                        \n

                        \n Microsoft SQL Server\n

                        \n

                        For information, see Microsoft SQL Server Versions on Amazon RDS in the \n Amazon RDS User Guide.

                        \n

                        \n MySQL\n

                        \n

                        For information, see MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.

                        \n

                        \n Oracle\n

                        \n

                        For information, see Oracle Database Engine Release Notes in the \n Amazon RDS User Guide.

                        \n

                        \n PostgreSQL\n

                        \n

                        For information, see Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.

                        " + "smithy.api#documentation": "

                        The version number of the database engine to use.

                        \n

                        For a list of valid engine versions, use the DescribeDBEngineVersions\n operation.

                        \n

                        The following are the database engines and links to information about the major and minor versions that are available with \n Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The version number of the database engine to be used by the DB\n instance is managed by the DB cluster.

                        \n

                        \n Amazon RDS Custom for Oracle\n

                        \n

                        A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV \n name has the following format: 19.customized_string\n . An example identifier is \n 19.my_cev1. For more information, see \n Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.

                        \n

                        \n Amazon RDS Custom for SQL Server\n

                        \n

                        See RDS Custom for SQL Server general requirements \n in the Amazon RDS User Guide.

                        \n

                        \n MariaDB\n

                        \n

                        For information, see MariaDB on Amazon RDS Versions in the \n Amazon RDS User Guide.

                        \n

                        \n Microsoft SQL Server\n

                        \n

                        For information, see Microsoft SQL Server Versions on Amazon RDS in the \n Amazon RDS User Guide.

                        \n

                        \n MySQL\n

                        \n

                        For information, see MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.

                        \n

                        \n Oracle\n

                        \n

                        For information, see Oracle Database Engine Release Notes in the \n Amazon RDS User Guide.

                        \n

                        \n PostgreSQL\n

                        \n

                        For information, see Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.

                        " } }, "AutoMinorVersionUpgrade": { @@ -2639,19 +2639,19 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        License model information for this DB instance.

                        \n

                        Valid values: license-included | bring-your-own-license | general-public-license\n

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        License model information for this DB instance.

                        \n

                        Valid values: license-included | bring-your-own-license | general-public-license\n

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable.

                        " } }, "Iops": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.\n For information about valid Iops values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.

                        \n

                        Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 \n of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 \n of the storage amount for the DB instance.

                        " + "smithy.api#documentation": "

                        The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.\n For information about valid Iops values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.

                        \n

                        Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 \n of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 \n of the storage amount for the DB instance.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. Storage is managed by the DB cluster.

                        " } }, "OptionGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        A value that indicates that the DB instance should be associated with the specified option group.

                        \n

                        Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed \n from an option group. Also, that option group can't be removed from a DB instance after it is \n associated with a DB instance.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        A value that indicates that the DB instance should be associated with the specified option group.

                        \n

                        Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed \n from an option group. Also, that option group can't be removed from a DB instance after it is \n associated with a DB instance.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable.

                        " } }, "CharacterSetName": { @@ -2687,13 +2687,13 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        Specifies the storage type to be associated with the DB instance.

                        \n

                        Valid values: standard | gp2 | io1\n

                        \n

                        If you specify io1, you must also include a value for the\n Iops parameter.

                        \n

                        Default: io1 if the Iops parameter\n is specified, otherwise gp2\n

                        " + "smithy.api#documentation": "

                        Specifies the storage type to be associated with the DB instance.

                        \n

                        Valid values: standard | gp2 | io1\n

                        \n

                        If you specify io1, you must also include a value for the\n Iops parameter.

                        \n

                        Default: io1 if the Iops parameter\n is specified, otherwise gp2\n

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. Storage is managed by the DB cluster.

                        " } }, "TdeCredentialArn": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The ARN from the key store with which to associate the instance for TDE encryption.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        The ARN from the key store with which to associate the instance for TDE encryption.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable.

                        " } }, "TdeCredentialPassword": { @@ -2717,7 +2717,7 @@ "Domain": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL \n Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

                        \n

                        For more information, see \n Kerberos Authentication in the Amazon RDS User Guide.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL \n Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

                        \n

                        For more information, see \n Kerberos Authentication in the Amazon RDS User Guide.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The domain is managed by the DB cluster.

                        " } }, "CopyTagsToSnapshot": { @@ -2741,7 +2741,7 @@ "DomainIAMRoleName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        Specify the name of the IAM role to be used when making API calls to the Directory Service.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        Specify the name of the IAM role to be used when making API calls to the Directory Service.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The domain is managed by the DB cluster.

                        " } }, "PromotionTier": { @@ -2759,7 +2759,7 @@ "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

                        A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management\n (IAM) accounts to database accounts. By default, mapping isn't enabled.

                        \n

                        This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora, mapping Amazon Web Services IAM accounts \n to database accounts is managed by the DB cluster.

                        \n

                        For more information, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

                        " + "smithy.api#documentation": "

                        A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management\n (IAM) accounts to database accounts. By default, mapping isn't enabled.

                        \n

                        For more information, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.

                        " } }, "EnablePerformanceInsights": { @@ -2777,7 +2777,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        \n
                          \n
                        • \n

                          7

                          \n
                        • \n
                        • \n

                          \n month * 31, where month is a number of months from 1-23

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        For example, the following values are valid:

                        \n
                          \n
                        • \n

                          93 (3 months * 31)

                          \n
                        • \n
                        • \n

                          341 (11 months * 31)

                          \n
                        • \n
                        • \n

                          589 (19 months * 31)

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " } }, "EnableCloudwatchLogsExports": { @@ -2789,7 +2789,7 @@ "ProcessorFeatures": { "target": "com.amazonaws.rds#ProcessorFeatureList", "traits": { - "smithy.api#documentation": "

                        The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable.

                        " } }, "DeletionProtection": { @@ -2801,7 +2801,7 @@ "MaxAllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

                        \n

                        For more information about this setting, including limitations that apply to it, see \n \n Managing capacity automatically with Amazon RDS storage autoscaling \n in the Amazon RDS User Guide.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

                        \n

                        For more information about this setting, including limitations that apply to it, see \n \n Managing capacity automatically with Amazon RDS storage autoscaling \n in the Amazon RDS User Guide.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. Storage is managed by the DB cluster.

                        " } }, "EnableCustomerOwnedIp": { @@ -2904,7 +2904,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Creates a new DB instance that acts as a read replica for an existing source DB\n instance. You can create a read replica for a DB instance running MySQL, MariaDB,\n Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read\n Replicas in the Amazon RDS User Guide.

                        \n

                        Amazon Aurora doesn't support this action. Call the CreateDBInstance\n action to create a DB instance for an Aurora DB cluster.

                        \n

                        All read replica DB instances are created with backups disabled. All other DB\n instance attributes (including DB security groups and DB parameter groups) are inherited\n from the source DB instance, except as specified.

                        \n \n

                        Your source DB instance must have backup retention enabled.

                        \n
                        " + "smithy.api#documentation": "

                        Creates a new DB instance that acts as a read replica for an existing source DB\n instance. You can create a read replica for a DB instance running MySQL, MariaDB,\n Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read\n Replicas in the Amazon RDS User Guide.

                        \n

                        Amazon Aurora doesn't support this operation. Call the CreateDBInstance\n operation to create a DB instance for an Aurora DB cluster.

                        \n

                        All read replica DB instances are created with backups disabled. All other DB\n instance attributes (including DB security groups and DB parameter groups) are inherited\n from the source DB instance, except as specified.

                        \n \n

                        Your source DB instance must have backup retention enabled.

                        \n
                        " } }, "com.amazonaws.rds#CreateDBInstanceReadReplicaMessage": { @@ -2969,7 +2969,7 @@ "DBParameterGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The name of the DB parameter group to associate with this DB instance.

                        \n

                        If you do not specify a value for DBParameterGroupName, then Amazon RDS\n uses the DBParameterGroup of source DB instance for a same Region read\n replica, or the default DBParameterGroup for the specified DB engine for a\n cross-Region read replica.

                        \n

                        Specifying a parameter group for this operation is only supported for Oracle DB instances. It \n isn't supported for RDS Custom.

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          Must be 1 to 255 letters, numbers, or hyphens.

                          \n
                        • \n
                        • \n

                          First character must be a letter

                          \n
                        • \n
                        • \n

                          Can't end with a hyphen or contain two consecutive hyphens

                          \n
                        • \n
                        " + "smithy.api#documentation": "

                        The name of the DB parameter group to associate with this DB instance.

                        \n

                        If you do not specify a value for DBParameterGroupName, then Amazon RDS\n uses the DBParameterGroup of source DB instance for a same Region read\n replica, or the default DBParameterGroup for the specified DB engine for a\n cross-Region read replica.

                        \n

                        Specifying a parameter group for this operation is only supported for MySQL and Oracle DB instances. \n It isn't supported for RDS Custom.

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          Must be 1 to 255 letters, numbers, or hyphens.

                          \n
                        • \n
                        • \n

                          First character must be a letter

                          \n
                        • \n
                        • \n

                          Can't end with a hyphen or contain two consecutive hyphens

                          \n
                        • \n
                        " } }, "PubliclyAccessible": { @@ -3026,7 +3026,7 @@ "PreSignedUrl": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica API action \n in the source Amazon Web Services Region that contains the source DB instance.

                        \n

                        You must specify this parameter when you create an encrypted read replica from\n another Amazon Web Services Region by using the Amazon RDS API. Don't specify\n PreSignedUrl when you are creating an encrypted read replica in the\n same Amazon Web Services Region.

                        \n

                        The presigned URL must be a valid request for the CreateDBInstanceReadReplica API action \n that can be executed in the source Amazon Web Services Region that contains the encrypted source DB instance. \n The presigned URL request must contain the following parameter values:

                        \n
                          \n
                        • \n

                          \n DestinationRegion - The Amazon Web Services Region that the encrypted read\n replica is created in. This Amazon Web Services Region is the same one where the\n CreateDBInstanceReadReplica action is called that contains this presigned URL.

                          \n

                          For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region,\n from a source DB instance in the us-east-2 Amazon Web Services Region, \n then you call the CreateDBInstanceReadReplica action in\n the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the\n CreateDBInstanceReadReplica action in the us-west-2 Amazon Web Services Region. For this\n example, the DestinationRegion in the presigned URL must be set to\n the us-east-1 Amazon Web Services Region.

                          \n
                        • \n
                        • \n

                          \n KmsKeyId - The Amazon Web Services KMS key identifier for the key to use to\n encrypt the read replica in the destination Amazon Web Services Region. This is the same\n identifier for both the CreateDBInstanceReadReplica action that is\n called in the destination Amazon Web Services Region, and the action contained in the presigned\n URL.

                          \n
                        • \n
                        • \n

                          \n SourceDBInstanceIdentifier - The DB instance identifier for\n the encrypted DB instance to be replicated. This identifier must be in the\n Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you\n are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services\n Region, then your SourceDBInstanceIdentifier looks like the\n following example:\n arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115.

                          \n
                        • \n
                        \n

                        To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.

                        \n \n

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion (or --source-region for the CLI)\n instead of specifying PreSignedUrl manually. Specifying\n SourceRegion autogenerates a presigned URL that is a valid request\n for the operation that can be executed in the source Amazon Web Services Region.

                        \n

                        \n SourceRegion isn't supported for SQL Server, because SQL Server on Amazon RDS\n doesn't support cross-Region read replicas.

                        \n
                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or\n from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4\n signed request for the CreateDBInstanceReadReplica API operation in the\n source Amazon Web Services Region that contains the source DB instance.

                        \n

                        This setting applies only to Amazon Web Services GovCloud (US) Regions and \n China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions.

                        \n

                        You must specify this parameter when you create an encrypted read replica from\n another Amazon Web Services Region by using the Amazon RDS API. Don't specify\n PreSignedUrl when you are creating an encrypted read replica in the\n same Amazon Web Services Region.

                        \n

                        The presigned URL must be a valid request for the\n CreateDBInstanceReadReplica API operation that can run in the\n source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL\n request must contain the following parameter values:

                        \n
                          \n
                        • \n

                          \n DestinationRegion - The Amazon Web Services Region that the encrypted read\n replica is created in. This Amazon Web Services Region is the same one where the\n CreateDBInstanceReadReplica operation is called that contains\n this presigned URL.

                          \n

                          For example, if you create an encrypted DB instance in the us-west-1\n Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you\n call the CreateDBInstanceReadReplica operation in the us-east-1\n Amazon Web Services Region and provide a presigned URL that contains a call to the\n CreateDBInstanceReadReplica operation in the us-west-2\n Amazon Web Services Region. For this example, the DestinationRegion in the\n presigned URL must be set to the us-east-1 Amazon Web Services Region.

                          \n
                        • \n
                        • \n

                          \n KmsKeyId - The KMS key identifier for the key to use to\n encrypt the read replica in the destination Amazon Web Services Region. This is the same\n identifier for both the CreateDBInstanceReadReplica operation that\n is called in the destination Amazon Web Services Region, and the operation contained in the\n presigned URL.

                          \n
                        • \n
                        • \n

                          \n SourceDBInstanceIdentifier - The DB instance identifier for\n the encrypted DB instance to be replicated. This identifier must be in the\n Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you\n are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services\n Region, then your SourceDBInstanceIdentifier looks like the\n following example:\n arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115.

                          \n
                        • \n
                        \n

                        To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.

                        \n \n

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion (or --source-region for the CLI)\n instead of specifying PreSignedUrl manually. Specifying\n SourceRegion autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.

                        \n

                        \n SourceRegion isn't supported for SQL Server, because Amazon RDS for SQL Server \n doesn't support cross-Region read replicas.

                        \n
                        \n

                        This setting doesn't apply to RDS Custom.

                        " } }, "EnableIAMDatabaseAuthentication": { @@ -3050,7 +3050,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        \n
                          \n
                        • \n

                          7

                          \n
                        • \n
                        • \n

                          \n month * 31, where month is a number of months from 1-23

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        For example, the following values are valid:

                        \n
                          \n
                        • \n

                          93 (3 months * 31)

                          \n
                        • \n
                        • \n

                          341 (11 months * 31)

                          \n
                        • \n
                        • \n

                          589 (19 months * 31)

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                        \n \n

                        This setting doesn't apply to RDS Custom.

                        " } }, "EnableCloudwatchLogsExports": { @@ -3311,7 +3311,7 @@ "EngineFamily": { "target": "com.amazonaws.rds#EngineFamily", "traits": { - "smithy.api#documentation": "

                        The kinds of databases that the proxy can connect to. \n This value determines which database network protocol the proxy recognizes when it interprets\n network traffic to and from the database. \n The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

                        ", + "smithy.api#documentation": "

                        The kinds of databases that the proxy can connect to. \n This value determines which database network protocol the proxy recognizes when it interprets\n network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify MYSQL. \n For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL.

                        ", "smithy.api#required": {} } }, @@ -3601,7 +3601,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Creates an RDS event notification subscription. This action requires a topic Amazon\n Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API.\n To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the\n topic. The ARN is displayed in the SNS console.

                        \n

                        You can specify the type of source (SourceType) that you want to be\n notified of and provide a list of RDS sources (SourceIds) that triggers the\n events. You can also provide a list of event categories (EventCategories)\n for events that you want to be notified of. For example, you can specify\n SourceType = db-instance, SourceIds =\n mydbinstance1, mydbinstance2 and\n EventCategories = Availability,\n Backup.

                        \n

                        If you specify both the SourceType and SourceIds, such as SourceType = db-instance\n and SourceIds = myDBInstance1, you are notified of all the db-instance events for\n the specified source. If you specify a SourceType but do not specify SourceIds,\n you receive notice of the events for that source type for all your RDS sources. If you\n don't specify either the SourceType or the SourceIds, you are notified of events\n generated from all RDS sources belonging to your customer account.

                        \n \n

                        RDS event notification is only available for unencrypted SNS topics. If you specify an \n encrypted SNS topic, event notifications aren't sent for the topic.

                        \n
                        " + "smithy.api#documentation": "

                        Creates an RDS event notification subscription. This operation requires a topic Amazon\n Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API.\n To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the\n topic. The ARN is displayed in the SNS console.

                        \n

                        You can specify the type of source (SourceType) that you want to be\n notified of and provide a list of RDS sources (SourceIds) that triggers the\n events. You can also provide a list of event categories (EventCategories)\n for events that you want to be notified of. For example, you can specify\n SourceType = db-instance, SourceIds =\n mydbinstance1, mydbinstance2 and\n EventCategories = Availability,\n Backup.

                        \n

                        If you specify both the SourceType and SourceIds, such as SourceType = db-instance\n and SourceIds = myDBInstance1, you are notified of all the db-instance events for\n the specified source. If you specify a SourceType but do not specify SourceIds,\n you receive notice of the events for that source type for all your RDS sources. If you\n don't specify either the SourceType or the SourceIds, you are notified of events\n generated from all RDS sources belonging to your customer account.

                        \n \n

                        RDS event notification is only available for unencrypted SNS topics. If you specify an \n encrypted SNS topic, event notifications aren't sent for the topic.

                        \n
                        " } }, "com.amazonaws.rds#CreateEventSubscriptionMessage": { @@ -3684,7 +3684,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Creates an Aurora global database\n spread across multiple Amazon Web Services Regions. The global database\n contains a single primary cluster with read-write capability,\n and a read-only secondary cluster that receives\n data from the primary cluster through high-speed replication\n performed by the Aurora storage subsystem.

                        \n

                        You can create a global database that is initially empty, and then\n add a primary cluster and a secondary cluster to it.\n Or you can specify an existing Aurora cluster during the create operation,\n and this cluster becomes the primary cluster of the global database.

                        \n \n

                        This action only applies to Aurora DB clusters.

                        \n
                        " + "smithy.api#documentation": "

                        Creates an Aurora global database\n spread across multiple Amazon Web Services Regions. The global database\n contains a single primary cluster with read-write capability,\n and a read-only secondary cluster that receives\n data from the primary cluster through high-speed replication\n performed by the Aurora storage subsystem.

                        \n

                        You can create a global database that is initially empty, and then\n add a primary cluster and a secondary cluster to it.\n Or you can specify an existing Aurora cluster during the create operation,\n and this cluster becomes the primary cluster of the global database.

                        \n \n

                        This action applies only to Aurora DB clusters.

                        \n
                        " } }, "com.amazonaws.rds#CreateGlobalClusterMessage": { @@ -3723,7 +3723,7 @@ "DatabaseName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon\n Aurora will not create a database in the global database cluster you are creating.

                        " + "smithy.api#documentation": "

                        The name for your database of up to 64 alphanumeric characters. If you do not provide\n a name, Amazon Aurora will not create a database in the global database cluster you are\n creating.

                        " } }, "StorageEncrypted": { @@ -4325,7 +4325,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        \n

                        This setting is only for non-Aurora Multi-AZ DB clusters.

                        " + "smithy.api#documentation": "

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        \n
                          \n
                        • \n

                          7

                          \n
                        • \n
                        • \n

                          \n month * 31, where month is a number of months from 1-23

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        For example, the following values are valid:

                        \n
                          \n
                        • \n

                          93 (3 months * 31)

                          \n
                        • \n
                        • \n

                          341 (11 months * 31)

                          \n
                        • \n
                        • \n

                          589 (19 months * 31)

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        This setting is only for non-Aurora Multi-AZ DB clusters.

                        " } }, "ServerlessV2ScalingConfiguration": { @@ -5733,7 +5733,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        " + "smithy.api#documentation": "

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        \n
                          \n
                        • \n

                          7

                          \n
                        • \n
                        • \n

                          \n month * 31, where month is a number of months from 1-23

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        For example, the following values are valid:

                        \n
                          \n
                        • \n

                          93 (3 months * 31)

                          \n
                        • \n
                        • \n

                          341 (11 months * 31)

                          \n
                        • \n
                        • \n

                          589 (19 months * 31)

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        " } }, "EnabledCloudwatchLogsExports": { @@ -6520,7 +6520,7 @@ "EngineFamily": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.

                        " + "smithy.api#documentation": "

                        The kinds of databases that the proxy can connect to. This value determines which database network protocol \n the proxy recognizes when it interprets network traffic to and from the database. MYSQL supports Aurora MySQL, \n RDS for MariaDB, and RDS for MySQL databases. POSTGRESQL supports Aurora PostgreSQL and RDS for PostgreSQL databases.

                        " } }, "VpcId": { @@ -10386,13 +10386,13 @@ "DBInstanceIdentifier": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The ID of the DB instance to retrieve the list of DB snapshots for. \n This parameter can't be used in conjunction with DBSnapshotIdentifier.\n This parameter isn't case-sensitive.

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          If supplied, must match the identifier of an existing DBInstance.

                          \n
                        • \n
                        " + "smithy.api#documentation": "

                        The ID of the DB instance to retrieve the list of DB snapshots for. \n This parameter isn't case-sensitive.

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          If supplied, must match the identifier of an existing DBInstance.

                          \n
                        • \n
                        " } }, "DBSnapshotIdentifier": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        A specific DB snapshot identifier to describe. This parameter can't be used in conjunction with DBInstanceIdentifier. \n This value is stored as a lowercase string.

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          If supplied, must match the identifier of an existing DBSnapshot.

                          \n
                        • \n
                        • \n

                          If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.

                          \n
                        • \n
                        " + "smithy.api#documentation": "

                        A specific DB snapshot identifier to describe.\n This value is stored as a lowercase string.

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          If supplied, must match the identifier of an existing DBSnapshot.

                          \n
                        • \n
                        • \n

                          If this identifier is for an automated snapshot, the SnapshotType parameter must also be specified.

                          \n
                        • \n
                        " } }, "SnapshotType": { @@ -10705,7 +10705,7 @@ "target": "com.amazonaws.rds#EventsMessage" }, "traits": { - "smithy.api#documentation": "

                        Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. \n Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be \n obtained by providing the name as a parameter.

                        \n \n

                        By default, RDS returns events that were generated in the past hour.

                        \n
                        ", + "smithy.api#documentation": "

                        Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. \n Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be \n obtained by providing the name as a parameter.

                        \n

                        For more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora\n events in the Amazon Aurora User Guide.

                        \n \n

                        By default, RDS returns events that were generated in the past hour.

                        \n
                        ", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -11077,7 +11077,7 @@ "MaxRecords": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The maximum number of records to include in the response.\n If more records exist than the specified MaxRecords value,\n a pagination token called a marker is included in the response so that\n you can retrieve the remaining results.

                        \n

                        Default: 100

                        \n

                        Constraints: Minimum 20, maximum 100.

                        " + "smithy.api#documentation": "

                        The maximum number of records to include in the response.\n If more records exist than the specified MaxRecords value,\n a pagination token called a marker is included in the response so that\n you can retrieve the remaining results.

                        \n

                        Default: 100

                        \n

                        Constraints: Minimum 20, maximum 10000.

                        " } }, "Marker": { @@ -11767,7 +11767,7 @@ } }, "traits": { - "smithy.api#documentation": "

                        This data type is used as a response element in the DescribeEvents action.

                        " + "smithy.api#documentation": "

                        This data type is used as a response element in the DescribeEvents action.

                        " } }, "com.amazonaws.rds#EventCategoriesList": { @@ -11796,7 +11796,7 @@ } }, "traits": { - "smithy.api#documentation": "

                        Contains the results of a successful invocation of the DescribeEventCategories operation.

                        " + "smithy.api#documentation": "

                        Contains the results of a successful invocation of the DescribeEventCategories\n operation.

                        " } }, "com.amazonaws.rds#EventCategoriesMapList": { @@ -12146,7 +12146,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Forces a failover for a DB cluster.

                        \n

                        For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).

                        \n

                        For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).

                        \n

                        An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists,\n when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby \n DB instance when the primary DB instance fails.

                        \n

                        To simulate a failure of a primary instance for testing, you can force a failover. \n Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing \n connections that use those endpoint addresses when the failover is complete.

                        \n

                        For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

                        " + "smithy.api#documentation": "

                        Forces a failover for a DB cluster.

                        \n

                        For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).

                        \n

                        For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).

                        \n

                        An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists,\n when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby \n DB instance when the primary DB instance fails.

                        \n

                        To simulate a failure of a primary instance for testing, you can force a failover. \n Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing \n connections that use those endpoint addresses when the failover is complete.

                        \n

                        For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

                        " } }, "com.amazonaws.rds#FailoverDBClusterMessage": { @@ -13718,7 +13718,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        \n

                        Valid for: Multi-AZ DB clusters only

                        " + "smithy.api#documentation": "

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        \n
                          \n
                        • \n

                          7

                          \n
                        • \n
                        • \n

                          \n month * 31, where month is a number of months from 1-23

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        For example, the following values are valid:

                        \n
                          \n
                        • \n

                          93 (3 months * 31)

                          \n
                        • \n
                        • \n

                          341 (11 months * 31)

                          \n
                        • \n
                        • \n

                          589 (19 months * 31)

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                        \n

                        Valid for: Multi-AZ DB clusters only

                        " } }, "ServerlessV2ScalingConfiguration": { @@ -13746,7 +13746,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Modifies the parameters of a DB cluster parameter group. To modify more than one parameter,\n submit a list of the following: ParameterName, ParameterValue, \n and ApplyMethod. A maximum of 20\n parameters can be modified in a single request.

                        \n \n

                        After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters action to verify \n that your DB cluster parameter group has been created or modified.

                        \n

                        If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora\n applies the update immediately. The cluster restart might interrupt your workload. In that case,\n your application must reopen any connections and retry any transactions that were active\n when the parameter changes took effect.

                        \n
                        \n

                        For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                        " + "smithy.api#documentation": "

                        Modifies the parameters of a DB cluster parameter group. To modify more than one parameter,\n submit a list of the following: ParameterName, ParameterValue, \n and ApplyMethod. A maximum of 20\n parameters can be modified in a single request.

                        \n \n

                        After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters operation to verify \n that your DB cluster parameter group has been created or modified.

                        \n

                        If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora\n applies the update immediately. The cluster restart might interrupt your workload. In that case,\n your application must reopen any connections and retry any transactions that were active\n when the parameter changes took effect.

                        \n
                        \n

                        For more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.

                        \n

                        For more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n

                        " } }, "com.amazonaws.rds#ModifyDBClusterParameterGroupMessage": { @@ -13799,7 +13799,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

                        \n

                        To share a manual DB cluster snapshot with other Amazon Web Services accounts, specify\n restore as the AttributeName and use the\n ValuesToAdd parameter to add a list of IDs of the Amazon Web Services accounts that are\n authorized to restore the manual DB cluster snapshot. Use the value all to\n make the manual DB cluster snapshot public, which means that it can be copied or\n restored by all Amazon Web Services accounts.

                        \n \n

                        Don't add the all value for any manual DB cluster snapshots\n that contain private information that you don't want available to all Amazon Web Services\n accounts.

                        \n
                        \n

                        If a manual DB cluster snapshot is encrypted, it can be shared, but only by\n specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd\n parameter. You can't use all as a value for that parameter in this\n case.

                        \n

                        To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster\n snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are\n returned as values for the restore attribute.

                        " + "smithy.api#documentation": "

                        Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

                        \n

                        To share a manual DB cluster snapshot with other Amazon Web Services accounts, specify\n restore as the AttributeName and use the\n ValuesToAdd parameter to add a list of IDs of the Amazon Web Services accounts that are\n authorized to restore the manual DB cluster snapshot. Use the value all to\n make the manual DB cluster snapshot public, which means that it can be copied or\n restored by all Amazon Web Services accounts.

                        \n \n

                        Don't add the all value for any manual DB cluster snapshots\n that contain private information that you don't want available to all Amazon Web Services\n accounts.

                        \n
                        \n

                        If a manual DB cluster snapshot is encrypted, it can be shared, but only by\n specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd\n parameter. You can't use all as a value for that parameter in this\n case.

                        \n

                        To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster\n snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are\n returned as values for the restore attribute.

                        " } }, "com.amazonaws.rds#ModifyDBClusterSnapshotAttributeMessage": { @@ -13815,7 +13815,7 @@ "AttributeName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The name of the DB cluster snapshot attribute to modify.

                        \n

                        To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB cluster snapshot, \n set this value to restore.

                        \n \n

                        To view the list of attributes available to modify, use the\n DescribeDBClusterSnapshotAttributes API action.

                        \n
                        ", + "smithy.api#documentation": "

                        The name of the DB cluster snapshot attribute to modify.

                        \n

                        To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB cluster snapshot, \n set this value to restore.

                        \n \n

                        To view the list of attributes available to modify, use the\n DescribeDBClusterSnapshotAttributes API operation.

                        \n
                        ", "smithy.api#required": {} } }, @@ -13937,7 +13937,7 @@ "DBInstanceClass": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The new compute and memory capacity of the DB instance, for example db.m4.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes,\n and availability for your engine, see\n DB Instance Class in the Amazon RDS User Guide.

                        \n

                        If you modify the DB instance class, an outage occurs during the change.\n The change is applied during the next maintenance window,\n unless ApplyImmediately is enabled for this request.

                        \n

                        This setting doesn't apply to RDS Custom for Oracle.

                        \n

                        Default: Uses existing setting

                        " + "smithy.api#documentation": "

                        The new compute and memory capacity of the DB instance, for example db.m5.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes, and availability for your engine, see\n DB instance \n classes in the Amazon RDS User Guide or \n Aurora \n DB instance classes in the Amazon Aurora User Guide.

                        \n

                        If you modify the DB instance class, an outage occurs during the change.\n The change is applied during the next maintenance window,\n unless ApplyImmediately is enabled for this request.

                        \n

                        This setting doesn't apply to RDS Custom for Oracle.

                        \n

                        Default: Uses existing setting

                        " } }, "DBSubnetGroupName": { @@ -13967,7 +13967,7 @@ "MasterUserPassword": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The new password for the master user. The password can include any printable ASCII \n character except \"/\", \"\"\", or \"@\".

                        \n

                        Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. \n Between the time of the request and the completion of the request,\n the MasterUserPassword element exists in the\n PendingModifiedValues element of the operation response.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The password for the master user is managed by the DB cluster. For\n more information, see ModifyDBCluster.

                        \n

                        Default: Uses existing setting

                        \n

                        \n MariaDB\n

                        \n

                        Constraints: Must contain from 8 to 41 characters.

                        \n

                        \n Microsoft SQL Server\n

                        \n

                        Constraints: Must contain from 8 to 128 characters.

                        \n

                        \n MySQL\n

                        \n

                        Constraints: Must contain from 8 to 41 characters.

                        \n

                        \n Oracle\n

                        \n

                        Constraints: Must contain from 8 to 30 characters.

                        \n

                        \n PostgreSQL\n

                        \n

                        Constraints: Must contain from 8 to 128 characters.

                        \n \n

                        Amazon RDS API actions never return the password, \n so this action provides a way to regain access to a primary instance user if the password is lost. \n This includes restoring privileges that might have been accidentally revoked.

                        \n
                        " + "smithy.api#documentation": "

                        The new password for the master user. The password can include any printable ASCII \n character except \"/\", \"\"\", or \"@\".

                        \n

                        Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. \n Between the time of the request and the completion of the request,\n the MasterUserPassword element exists in the\n PendingModifiedValues element of the operation response.

                        \n

                        This setting doesn't apply to RDS Custom.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The password for the master user is managed by the DB cluster. For\n more information, see ModifyDBCluster.

                        \n

                        Default: Uses existing setting

                        \n

                        \n MariaDB\n

                        \n

                        Constraints: Must contain from 8 to 41 characters.

                        \n

                        \n Microsoft SQL Server\n

                        \n

                        Constraints: Must contain from 8 to 128 characters.

                        \n

                        \n MySQL\n

                        \n

                        Constraints: Must contain from 8 to 41 characters.

                        \n

                        \n Oracle\n

                        \n

                        Constraints: Must contain from 8 to 30 characters.

                        \n

                        \n PostgreSQL\n

                        \n

                        Constraints: Must contain from 8 to 128 characters.

                        \n \n

                        Amazon RDS API operations never return the password, \n so this action provides a way to regain access to a primary instance user if the password is lost. \n This includes restoring privileges that might have been accidentally revoked.

                        \n
                        " } }, "DBParameterGroupName": { @@ -13979,7 +13979,7 @@ "BackupRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

                        \n \n

                        Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.

                        \n
                        \n

                        These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled\n for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously\n applied as soon as possible.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The retention period for automated backups is managed by the DB\n cluster. For more information, see ModifyDBCluster.

                        \n

                        Default: Uses existing setting

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to \n read replicas. It can't be set to 0 or 35 for an RDS Custom for Oracle DB instance.

                          \n
                        • \n
                        • \n

                          It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or\n later.

                          \n
                        • \n
                        • \n

                          It can be specified for a PostgreSQL read replica only if the source is running PostgreSQL\n 9.3.5.

                          \n
                        • \n
                        " + "smithy.api#documentation": "

                        The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

                        \n \n

                        Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.

                        \n
                        \n

                        These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled\n for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously\n applied as soon as possible.

                        \n

                        \n Amazon Aurora\n

                        \n

                        Not applicable. The retention period for automated backups is managed by the DB\n cluster. For more information, see ModifyDBCluster.

                        \n

                        Default: Uses existing setting

                        \n

                        Constraints:

                        \n
                          \n
                        • \n

                          It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to \n read replicas. It can't be set to 0 for an RDS Custom for Oracle DB instance.

                          \n
                        • \n
                        • \n

                          It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or\n later.

                          \n
                        • \n
                        • \n

                          It can be specified for a PostgreSQL read replica only if the source is running PostgreSQL\n 9.3.5.

                          \n
                        • \n
                        " } }, "PreferredBackupWindow": { @@ -14123,7 +14123,7 @@ "EnablePerformanceInsights": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

                        A value that indicates whether to enable Performance Insights for the DB instance.

                        \n

                        For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide..

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        A value that indicates whether to enable Performance Insights for the DB instance.

                        \n

                        For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " } }, "PerformanceInsightsKMSKeyId": { @@ -14135,7 +14135,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        \n

                        This setting doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        \n
                          \n
                        • \n

                          7

                          \n
                        • \n
                        • \n

                          \n month * 31, where month is a number of months from 1-23

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        For example, the following values are valid:

                        \n
                          \n
                        • \n

                          93 (3 months * 31)

                          \n
                        • \n
                        • \n

                          341 (11 months * 31)

                          \n
                        • \n
                        • \n

                          589 (19 months * 31)

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                        \n

                        This setting doesn't apply to RDS Custom.

                        " } }, "CloudwatchLogsExportConfiguration": { @@ -14443,14 +14443,14 @@ "TargetGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The name of the new target group to assign to the proxy.

                        ", + "smithy.api#documentation": "

                        The name of the target group to modify.

                        ", "smithy.api#required": {} } }, "DBProxyName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The name of the new proxy to which to assign the target group.

                        ", + "smithy.api#documentation": "

                        The name of the proxy.

                        ", "smithy.api#required": {} } }, @@ -14516,7 +14516,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.

                        \n

                        To share a manual DB snapshot with other Amazon Web Services accounts, specify restore\n as the AttributeName and use the ValuesToAdd parameter to add\n a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB snapshot.\n Uses the value all to make the manual DB snapshot public, which means it\n can be copied or restored by all Amazon Web Services accounts.

                        \n \n

                        Don't add the all value for any manual DB snapshots that\n contain private information that you don't want available to all Amazon Web Services\n accounts.

                        \n
                        \n

                        If the manual DB snapshot is encrypted, it can be shared, but only by specifying a\n list of authorized Amazon Web Services account IDs for the ValuesToAdd parameter. You\n can't use all as a value for that parameter in this case.

                        \n

                        To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or\n whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as\n values for the restore attribute.

                        " + "smithy.api#documentation": "

                        Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.

                        \n

                        To share a manual DB snapshot with other Amazon Web Services accounts, specify restore\n as the AttributeName and use the ValuesToAdd parameter to add\n a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB snapshot.\n Uses the value all to make the manual DB snapshot public, which means it\n can be copied or restored by all Amazon Web Services accounts.

                        \n \n

                        Don't add the all value for any manual DB snapshots that\n contain private information that you don't want available to all Amazon Web Services\n accounts.

                        \n
                        \n

                        If the manual DB snapshot is encrypted, it can be shared, but only by specifying a\n list of authorized Amazon Web Services account IDs for the ValuesToAdd parameter. You\n can't use all as a value for that parameter in this case.

                        \n

                        To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or\n whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as\n values for the restore attribute.

                        " } }, "com.amazonaws.rds#ModifyDBSnapshotAttributeMessage": { @@ -14532,7 +14532,7 @@ "AttributeName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The name of the DB snapshot attribute to modify.

                        \n

                        To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, \n set this value to restore.

                        \n \n

                        To view the list of attributes available to modify, use the\n DescribeDBSnapshotAttributes API action.

                        \n
                        ", + "smithy.api#documentation": "

                        The name of the DB snapshot attribute to modify.

                        \n

                        To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, \n set this value to restore.

                        \n \n

                        To view the list of attributes available to modify, use the\n DescribeDBSnapshotAttributes API operation.

                        \n
                        ", "smithy.api#required": {} } }, @@ -16384,7 +16384,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        You might need to reboot your DB instance, usually for maintenance reasons. \n For example, if you make certain modifications, \n or if you change the DB parameter group associated with the DB instance, \n you must reboot the instance for the changes to take effect.

                        \n

                        Rebooting a DB instance restarts the database engine service. \n Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

                        \n

                        For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.\n

                        \n

                        This command doesn't apply to RDS Custom.

                        " + "smithy.api#documentation": "

                        You might need to reboot your DB instance, usually for maintenance reasons. \n For example, if you make certain modifications, \n or if you change the DB parameter group associated with the DB instance, \n you must reboot the instance for the changes to take effect.

                        \n

                        Rebooting a DB instance restarts the database engine service. \n Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

                        \n

                        For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.\n

                        \n

                        This command doesn't apply to RDS Custom.

                        \n

                        If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster operation.

                        " } }, "com.amazonaws.rds#RebootDBInstanceMessage": { @@ -17328,14 +17328,14 @@ "Engine": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The name of the database engine to be used for this DB cluster.

                        \n

                        Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql\n

                        ", + "smithy.api#documentation": "

                        The name of the database engine to be used for this DB cluster.

                        \n

                        Valid Values: aurora (for MySQL 5.6-compatible Aurora) and aurora-mysql \n (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

                        ", "smithy.api#required": {} } }, "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The version number of the database engine to use.

                        \n

                        To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

                        \n

                        \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

                        \n

                        To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:

                        \n

                        \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

                        \n

                        To list all of the available engine versions for aurora-postgresql, use the following command:

                        \n

                        \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n

                        \n

                        \n Aurora MySQL\n

                        \n

                        Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5, 8.0.mysql_aurora.3.01.0\n

                        \n

                        \n Aurora PostgreSQL\n

                        \n

                        Example: 9.6.3, 10.7\n

                        " + "smithy.api#documentation": "

                        The version number of the database engine to use.

                        \n

                        To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

                        \n

                        \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

                        \n

                        To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:

                        \n

                        \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

                        \n

                        \n Aurora MySQL\n

                        \n

                        Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.mysql_aurora.2.07.1,\n 8.0.mysql_aurora.3.02.0\n

                        " } }, "Port": { @@ -17638,7 +17638,7 @@ "EnableCloudwatchLogsExports": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

                        The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs.\n The values in the list depend on the DB engine being used.

                        \n

                        \n RDS for MySQL\n

                        \n

                        Possible values are error, general, and slowquery.

                        \n

                        \n RDS for PostgreSQL\n

                        \n

                        Possible values are postgresql and upgrade.

                        \n

                        \n Aurora MySQL\n

                        \n

                        Possible values are audit, error, general, and slowquery.

                        \n

                        \n Aurora PostgreSQL\n

                        \n

                        Possible value is postgresql.

                        \n

                        For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..

                        \n

                        For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                        \n

                        Valid for: Aurora DB clusters and Multi-AZ DB clusters

                        " + "smithy.api#documentation": "

                        The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs.\n The values in the list depend on the DB engine being used.

                        \n

                        \n RDS for MySQL\n

                        \n

                        Possible values are error, general, and slowquery.

                        \n

                        \n RDS for PostgreSQL\n

                        \n

                        Possible values are postgresql and upgrade.

                        \n

                        \n Aurora MySQL\n

                        \n

                        Possible values are audit, error, general, and slowquery.

                        \n

                        \n Aurora PostgreSQL\n

                        \n

                        Possible value is postgresql.

                        \n

                        For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

                        \n

                        For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                        \n

                        Valid for: Aurora DB clusters and Multi-AZ DB clusters

                        " } }, "EngineMode": { @@ -17874,7 +17874,7 @@ "EnableCloudwatchLogsExports": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "

                        The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used.

                        \n

                        \n RDS for MySQL\n

                        \n

                        Possible values are error, general, and slowquery.

                        \n

                        \n RDS for PostgreSQL\n

                        \n

                        Possible values are postgresql and upgrade.

                        \n

                        \n Aurora MySQL\n

                        \n

                        Possible values are audit, error, general, and slowquery.

                        \n

                        \n Aurora PostgreSQL\n

                        \n

                        Possible value is postgresql.

                        \n

                        For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..

                        \n

                        For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                        \n

                        Valid for: Aurora DB clusters and Multi-AZ DB clusters

                        " + "smithy.api#documentation": "

                        The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used.

                        \n

                        \n RDS for MySQL\n

                        \n

                        Possible values are error, general, and slowquery.

                        \n

                        \n RDS for PostgreSQL\n

                        \n

                        Possible values are postgresql and upgrade.

                        \n

                        \n Aurora MySQL\n

                        \n

                        Possible values are audit, error, general, and slowquery.

                        \n

                        \n Aurora PostgreSQL\n

                        \n

                        Possible value is postgresql.

                        \n

                        For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

                        \n

                        For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

                        \n

                        Valid for: Aurora DB clusters and Multi-AZ DB clusters

                        " } }, "DBClusterParameterGroupName": { @@ -18540,7 +18540,7 @@ "EnablePerformanceInsights": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

                        A value that indicates whether to enable Performance Insights for the DB instance.

                        \n

                        For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide..

                        " + "smithy.api#documentation": "

                        A value that indicates whether to enable Performance Insights for the DB instance.

                        \n

                        For more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.

                        " } }, "PerformanceInsightsKMSKeyId": { @@ -18552,7 +18552,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

                        " + "smithy.api#documentation": "

                        The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:

                        \n
                          \n
                        • \n

                          7

                          \n
                        • \n
                        • \n

                          \n month * 31, where month is a number of months from 1-23

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        For example, the following values are valid:

                        \n
                          \n
                        • \n

                          93 (3 months * 31)

                          \n
                        • \n
                        • \n

                          341 (11 months * 31)

                          \n
                        • \n
                        • \n

                          589 (19 months * 31)

                          \n
                        • \n
                        • \n

                          731

                          \n
                        • \n
                        \n

                        If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.

                        " } }, "EnableCloudwatchLogsExports": { @@ -19111,7 +19111,7 @@ "MinCapacity": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

                        The maximum capacity for the Aurora DB cluster in serverless DB engine\n mode.

                        " + "smithy.api#documentation": "

                        The minimum capacity for an Aurora DB cluster in serverless DB engine mode.

                        " } }, "MaxCapacity": { @@ -19604,7 +19604,7 @@ "PreSignedUrl": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        A URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication action to be \n called in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the\n StartDBInstanceAutomatedBackupsReplication API action that can be executed in the Amazon Web Services Region that contains\n the source DB instance.

                        " + "smithy.api#documentation": "

                        In an Amazon Web Services GovCloud (US) Region, an URL that contains a Signature Version 4 signed request \n for the StartDBInstanceAutomatedBackupsReplication operation to call \n in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the\n StartDBInstanceAutomatedBackupsReplication API operation that can run in \n the Amazon Web Services Region that contains the source DB instance.

                        \n

                        This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other\n Amazon Web Services Regions.

                        \n

                        To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.

                        \n \n

                        If you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion (or --source-region for the CLI)\n instead of specifying PreSignedUrl manually. Specifying\n SourceRegion autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.

                        \n
                        " } } } @@ -19889,7 +19889,7 @@ } ], "traits": { - "smithy.api#documentation": "

                        Stops automated backup replication for a DB instance.

                        \n

                        This command doesn't apply to RDS Custom.

                        \n

                        For more information, see \n Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.\n

                        " + "smithy.api#documentation": "

                        Stops automated backup replication for a DB instance.

                        \n

                        This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.

                        \n

                        For more information, see \n Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.\n

                        " } }, "com.amazonaws.rds#StopDBInstanceAutomatedBackupsReplicationMessage": { @@ -20556,7 +20556,7 @@ "Status": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

                        The status of the VPC security group.

                        " + "smithy.api#documentation": "

                        The membership status of the VPC security group.

                        \n

                        Currently, the only valid status is active.

                        " } } }, From 7fb7490a4c093be17407d16f77151f3fa7c3e03a Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:03 +0000 Subject: [PATCH 28/31] feat(client-network-firewall): Network Firewall now supports referencing dynamic IP sets from stateful rule groups, for IP sets stored in Amazon VPC prefix lists. --- .../src/models/models_0.ts | 120 +++++++++++++++ .../src/protocols/Aws_json1_0.ts | 108 +++++++++++++ .../aws-models/network-firewall.json | 143 ++++++++++++++++++ 3 files changed, 371 insertions(+) diff --git a/clients/client-network-firewall/src/models/models_0.ts b/clients/client-network-firewall/src/models/models_0.ts index 2c885f3d56958..a6142a524f281 100644 --- a/clients/client-network-firewall/src/models/models_0.ts +++ b/clients/client-network-firewall/src/models/models_0.ts @@ -470,7 +470,75 @@ export namespace Attachment { }); } +/** + *

                        General information about the IP set.

                        + */ +export interface IPSetMetadata { + /** + *

                        Describes the total number of CIDR blocks currently in use by the IP set references in a firewall. To determine how many CIDR blocks are available for you to use in a firewall, you can call AvailableCIDRCount.

                        + */ + ResolvedCIDRCount?: number; +} + +export namespace IPSetMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IPSetMetadata): any => ({ + ...obj, + }); +} + +/** + *

                        Summarizes the CIDR blocks used by the IP set references in a firewall. Network Firewall calculates the number of CIDRs by taking an aggregated count of all CIDRs used by the IP sets you are referencing.

                        + */ +export interface CIDRSummary { + /** + *

                        The number of CIDR blocks available for use by the IP set references in a firewall.

                        + */ + AvailableCIDRCount?: number; + + /** + *

                        The number of CIDR blocks used by the IP set references in a firewall.

                        + */ + UtilizedCIDRCount?: number; + + /** + *

                        The list of the IP set references used by a firewall.

                        + */ + IPSetReferences?: Record; +} + +export namespace CIDRSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CIDRSummary): any => ({ + ...obj, + }); +} + +/** + *

                        The capacity usage summary of the resources used by the ReferenceSets in a firewall.

                        + */ +export interface CapacityUsageSummary { + /** + *

                        Describes the capacity usage of the CIDR blocks used by the IP set references in a firewall.

                        + */ + CIDRs?: CIDRSummary; +} + +export namespace CapacityUsageSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CapacityUsageSummary): any => ({ + ...obj, + }); +} + export enum ConfigurationSyncState { + CAPACITY_CONSTRAINED = "CAPACITY_CONSTRAINED", IN_SYNC = "IN_SYNC", PENDING = "PENDING", } @@ -699,6 +767,7 @@ export enum FirewallStatusValue { } export enum PerObjectSyncStatus { + CAPACITY_CONSTRAINED = "CAPACITY_CONSTRAINED", IN_SYNC = "IN_SYNC", PENDING = "PENDING", } @@ -810,6 +879,11 @@ export interface FirewallStatus { * and configuration object.

                        */ SyncStates?: Record; + + /** + *

                        Describes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calclulates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.

                        + */ + CapacityUsageSummary?: CapacityUsageSummary; } export namespace FirewallStatus { @@ -1268,6 +1342,47 @@ export namespace CreateFirewallPolicyResponse { }); } +/** + *

                        Configures one or more IP set references for a Suricata-compatible rule group. This is used in CreateRuleGroup or UpdateRuleGroup. An IP set reference is a rule variable that references a resource that you create and manage in another Amazon Web Services service, such as an Amazon VPC prefix list. Network Firewall IP set references enable you to dynamically update the contents of your rules. When you create, update, or delete the IP set you are referencing in your rule, Network Firewall automatically updates the rule's content with the changes. For more information about IP set references in Network Firewall, see Using IP set references in the Network Firewall Developer Guide.

                        + *

                        + * Network Firewall currently supports only Amazon VPC prefix lists as IP set references. + *

                        + */ +export interface IPSetReference { + /** + *

                        The Amazon Resource Name (ARN) of the resource that you are referencing in your rule group.

                        + */ + ReferenceArn?: string; +} + +export namespace IPSetReference { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IPSetReference): any => ({ + ...obj, + }); +} + +/** + *

                        Contains a set of IP set references.

                        + */ +export interface ReferenceSets { + /** + *

                        The list of IP set references.

                        + */ + IPSetReferences?: Record; +} + +export namespace ReferenceSets { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ReferenceSets): any => ({ + ...obj, + }); +} + export enum GeneratedRulesType { ALLOWLIST = "ALLOWLIST", DENYLIST = "DENYLIST", @@ -1914,6 +2029,11 @@ export interface RuleGroup { */ RuleVariables?: RuleVariables; + /** + *

                        The list of a rule group's reference sets.

                        + */ + ReferenceSets?: ReferenceSets; + /** *

                        The stateful rules or stateless rules for the rule group.

                        */ diff --git a/clients/client-network-firewall/src/protocols/Aws_json1_0.ts b/clients/client-network-firewall/src/protocols/Aws_json1_0.ts index f3bb6ff4df143..60c83e340a4d1 100644 --- a/clients/client-network-firewall/src/protocols/Aws_json1_0.ts +++ b/clients/client-network-firewall/src/protocols/Aws_json1_0.ts @@ -109,6 +109,8 @@ import { AssociateSubnetsRequest, AssociateSubnetsResponse, Attachment, + CapacityUsageSummary, + CIDRSummary, CreateFirewallPolicyRequest, CreateFirewallPolicyResponse, CreateFirewallRequest, @@ -154,6 +156,8 @@ import { InvalidResourcePolicyException, InvalidTokenException, IPSet, + IPSetMetadata, + IPSetReference, LimitExceededException, ListFirewallPoliciesRequest, ListFirewallPoliciesResponse, @@ -173,6 +177,7 @@ import { PublishMetricAction, PutResourcePolicyRequest, PutResourcePolicyResponse, + ReferenceSets, ResourceNotFoundException, ResourceOwnerCheckException, RuleDefinition, @@ -2842,6 +2847,24 @@ const serializeAws_json1_0IPSet = (input: IPSet, context: __SerdeContext): any = }; }; +const serializeAws_json1_0IPSetReference = (input: IPSetReference, context: __SerdeContext): any => { + return { + ...(input.ReferenceArn != null && { ReferenceArn: input.ReferenceArn }), + }; +}; + +const serializeAws_json1_0IPSetReferenceMap = (input: Record, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: Record, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: serializeAws_json1_0IPSetReference(value, context), + }; + }, {}); +}; + const serializeAws_json1_0IPSets = (input: Record, context: __SerdeContext): any => { return Object.entries(input).reduce((acc: Record, [key, value]: [string, any]) => { if (value === null) { @@ -3012,6 +3035,14 @@ const serializeAws_json1_0PutResourcePolicyRequest = ( }; }; +const serializeAws_json1_0ReferenceSets = (input: ReferenceSets, context: __SerdeContext): any => { + return { + ...(input.IPSetReferences != null && { + IPSetReferences: serializeAws_json1_0IPSetReferenceMap(input.IPSetReferences, context), + }), + }; +}; + const serializeAws_json1_0RuleDefinition = (input: RuleDefinition, context: __SerdeContext): any => { return { ...(input.Actions != null && { Actions: serializeAws_json1_0StatelessActions(input.Actions, context) }), @@ -3023,6 +3054,9 @@ const serializeAws_json1_0RuleDefinition = (input: RuleDefinition, context: __Se const serializeAws_json1_0RuleGroup = (input: RuleGroup, context: __SerdeContext): any => { return { + ...(input.ReferenceSets != null && { + ReferenceSets: serializeAws_json1_0ReferenceSets(input.ReferenceSets, context), + }), ...(input.RuleVariables != null && { RuleVariables: serializeAws_json1_0RuleVariables(input.RuleVariables, context), }), @@ -3544,6 +3578,23 @@ const deserializeAws_json1_0Attachment = (output: any, context: __SerdeContext): } as any; }; +const deserializeAws_json1_0CapacityUsageSummary = (output: any, context: __SerdeContext): CapacityUsageSummary => { + return { + CIDRs: output.CIDRs != null ? deserializeAws_json1_0CIDRSummary(output.CIDRs, context) : undefined, + } as any; +}; + +const deserializeAws_json1_0CIDRSummary = (output: any, context: __SerdeContext): CIDRSummary => { + return { + AvailableCIDRCount: __expectInt32(output.AvailableCIDRCount), + IPSetReferences: + output.IPSetReferences != null + ? deserializeAws_json1_0IPSetMetadataMap(output.IPSetReferences, context) + : undefined, + UtilizedCIDRCount: __expectInt32(output.UtilizedCIDRCount), + } as any; +}; + const deserializeAws_json1_0CreateFirewallPolicyResponse = ( output: any, context: __SerdeContext @@ -3880,6 +3931,10 @@ const deserializeAws_json1_0Firewalls = (output: any, context: __SerdeContext): const deserializeAws_json1_0FirewallStatus = (output: any, context: __SerdeContext): FirewallStatus => { return { + CapacityUsageSummary: + output.CapacityUsageSummary != null + ? deserializeAws_json1_0CapacityUsageSummary(output.CapacityUsageSummary, context) + : undefined, ConfigurationSyncStateSummary: __expectString(output.ConfigurationSyncStateSummary), Status: __expectString(output.Status), SyncStates: output.SyncStates != null ? deserializeAws_json1_0SyncStates(output.SyncStates, context) : undefined, @@ -3964,6 +4019,48 @@ const deserializeAws_json1_0IPSet = (output: any, context: __SerdeContext): IPSe } as any; }; +const deserializeAws_json1_0IPSetMetadata = (output: any, context: __SerdeContext): IPSetMetadata => { + return { + ResolvedCIDRCount: __expectInt32(output.ResolvedCIDRCount), + } as any; +}; + +const deserializeAws_json1_0IPSetMetadataMap = ( + output: any, + context: __SerdeContext +): Record => { + return Object.entries(output).reduce((acc: Record, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: deserializeAws_json1_0IPSetMetadata(value, context), + }; + }, {}); +}; + +const deserializeAws_json1_0IPSetReference = (output: any, context: __SerdeContext): IPSetReference => { + return { + ReferenceArn: __expectString(output.ReferenceArn), + } as any; +}; + +const deserializeAws_json1_0IPSetReferenceMap = ( + output: any, + context: __SerdeContext +): Record => { + return Object.entries(output).reduce((acc: Record, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: deserializeAws_json1_0IPSetReference(value, context), + }; + }, {}); +}; + const deserializeAws_json1_0IPSets = (output: any, context: __SerdeContext): Record => { return Object.entries(output).reduce((acc: Record, [key, value]: [string, any]) => { if (value === null) { @@ -4155,6 +4252,15 @@ const deserializeAws_json1_0PutResourcePolicyResponse = ( return {} as any; }; +const deserializeAws_json1_0ReferenceSets = (output: any, context: __SerdeContext): ReferenceSets => { + return { + IPSetReferences: + output.IPSetReferences != null + ? deserializeAws_json1_0IPSetReferenceMap(output.IPSetReferences, context) + : undefined, + } as any; +}; + const deserializeAws_json1_0ResourceNotFoundException = ( output: any, context: __SerdeContext @@ -4185,6 +4291,8 @@ const deserializeAws_json1_0RuleDefinition = (output: any, context: __SerdeConte const deserializeAws_json1_0RuleGroup = (output: any, context: __SerdeContext): RuleGroup => { return { + ReferenceSets: + output.ReferenceSets != null ? deserializeAws_json1_0ReferenceSets(output.ReferenceSets, context) : undefined, RuleVariables: output.RuleVariables != null ? deserializeAws_json1_0RuleVariables(output.RuleVariables, context) : undefined, RulesSource: diff --git a/codegen/sdk-codegen/aws-models/network-firewall.json b/codegen/sdk-codegen/aws-models/network-firewall.json index 7fc1d8c7f3726..7c9bb8ac18a07 100644 --- a/codegen/sdk-codegen/aws-models/network-firewall.json +++ b/codegen/sdk-codegen/aws-models/network-firewall.json @@ -340,6 +340,56 @@ "com.amazonaws.networkfirewall#Boolean": { "type": "boolean" }, + "com.amazonaws.networkfirewall#CIDRCount": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 1000000 + } + } + }, + "com.amazonaws.networkfirewall#CIDRSummary": { + "type": "structure", + "members": { + "AvailableCIDRCount": { + "target": "com.amazonaws.networkfirewall#CIDRCount", + "traits": { + "smithy.api#documentation": "

                        The number of CIDR blocks available for use by the IP set references in a firewall.

                        " + } + }, + "UtilizedCIDRCount": { + "target": "com.amazonaws.networkfirewall#CIDRCount", + "traits": { + "smithy.api#documentation": "

                        The number of CIDR blocks used by the IP set references in a firewall.

                        " + } + }, + "IPSetReferences": { + "target": "com.amazonaws.networkfirewall#IPSetMetadataMap", + "traits": { + "smithy.api#documentation": "

                        The list of the IP set references used by a firewall.

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        Summarizes the CIDR blocks used by the IP set references in a firewall. Network Firewall calculates the number of CIDRs by taking an aggregated count of all CIDRs used by the IP sets you are referencing.

                        " + } + }, + "com.amazonaws.networkfirewall#CapacityUsageSummary": { + "type": "structure", + "members": { + "CIDRs": { + "target": "com.amazonaws.networkfirewall#CIDRSummary", + "traits": { + "smithy.api#documentation": "

                        Describes the capacity usage of the CIDR blocks used by the IP set references in a firewall.

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        The capacity usage summary of the resources used by the ReferenceSets in a firewall.

                        " + } + }, "com.amazonaws.networkfirewall#CollectionMember_String": { "type": "string" }, @@ -354,6 +404,10 @@ { "value": "IN_SYNC", "name": "IN_SYNC" + }, + { + "value": "CAPACITY_CONSTRAINED", + "name": "CAPACITY_CONSTRAINED" } ] } @@ -1824,6 +1878,12 @@ "traits": { "smithy.api#documentation": "

                        The subnets that you've configured for use by the Network Firewall firewall. This contains\n one array element per Availability Zone where you've configured a subnet. These objects\n provide details of the information that is summarized in the\n ConfigurationSyncStateSummary and Status, broken down by zone\n and configuration object.

                        " } + }, + "CapacityUsageSummary": { + "target": "com.amazonaws.networkfirewall#CapacityUsageSummary", + "traits": { + "smithy.api#documentation": "

                        Describes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calclulates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.

                        " + } } }, "traits": { @@ -1961,6 +2021,65 @@ "smithy.api#documentation": "

                        A list of IP addresses and address ranges, in CIDR notation. This is part of a RuleVariables.

                        " } }, + "com.amazonaws.networkfirewall#IPSetArn": { + "type": "string" + }, + "com.amazonaws.networkfirewall#IPSetMetadata": { + "type": "structure", + "members": { + "ResolvedCIDRCount": { + "target": "com.amazonaws.networkfirewall#CIDRCount", + "traits": { + "smithy.api#documentation": "

                        Describes the total number of CIDR blocks currently in use by the IP set references in a firewall. To determine how many CIDR blocks are available for you to use in a firewall, you can call AvailableCIDRCount.

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        General information about the IP set.

                        " + } + }, + "com.amazonaws.networkfirewall#IPSetMetadataMap": { + "type": "map", + "key": { + "target": "com.amazonaws.networkfirewall#IPSetArn" + }, + "value": { + "target": "com.amazonaws.networkfirewall#IPSetMetadata" + } + }, + "com.amazonaws.networkfirewall#IPSetReference": { + "type": "structure", + "members": { + "ReferenceArn": { + "target": "com.amazonaws.networkfirewall#ResourceArn", + "traits": { + "smithy.api#documentation": "

                        The Amazon Resource Name (ARN) of the resource that you are referencing in your rule group.

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        Configures one or more IP set references for a Suricata-compatible rule group. This is used in CreateRuleGroup or UpdateRuleGroup. An IP set reference is a rule variable that references a resource that you create and manage in another Amazon Web Services service, such as an Amazon VPC prefix list. Network Firewall IP set references enable you to dynamically update the contents of your rules. When you create, update, or delete the IP set you are referencing in your rule, Network Firewall automatically updates the rule's content with the changes. For more information about IP set references in Network Firewall, see Using IP set references in the Network Firewall Developer Guide.

                        \n

                        \n Network Firewall currently supports only Amazon VPC prefix lists as IP set references.\n

                        " + } + }, + "com.amazonaws.networkfirewall#IPSetReferenceMap": { + "type": "map", + "key": { + "target": "com.amazonaws.networkfirewall#IPSetReferenceName" + }, + "value": { + "target": "com.amazonaws.networkfirewall#IPSetReference" + } + }, + "com.amazonaws.networkfirewall#IPSetReferenceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 32 + }, + "smithy.api#pattern": "^[A-Za-z][A-Za-z0-9_]*$" + } + }, "com.amazonaws.networkfirewall#IPSets": { "type": "map", "key": { @@ -2699,6 +2818,10 @@ { "value": "IN_SYNC", "name": "IN_SYNC" + }, + { + "value": "CAPACITY_CONSTRAINED", + "name": "CAPACITY_CONSTRAINED" } ] } @@ -2874,6 +2997,20 @@ "type": "structure", "members": {} }, + "com.amazonaws.networkfirewall#ReferenceSets": { + "type": "structure", + "members": { + "IPSetReferences": { + "target": "com.amazonaws.networkfirewall#IPSetReferenceMap", + "traits": { + "smithy.api#documentation": "

                        The list of IP set references.

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        Contains a set of IP set references.

                        " + } + }, "com.amazonaws.networkfirewall#ResourceArn": { "type": "string", "traits": { @@ -3010,6 +3147,12 @@ "smithy.api#documentation": "

                        Settings that are available for use in the rules in the rule group. You can only use\n these for stateful rule groups.

                        " } }, + "ReferenceSets": { + "target": "com.amazonaws.networkfirewall#ReferenceSets", + "traits": { + "smithy.api#documentation": "

                        The list of a rule group's reference sets.

                        " + } + }, "RulesSource": { "target": "com.amazonaws.networkfirewall#RulesSource", "traits": { From b8837a8fe3ee514eed76a87d54e9165433ea0215 Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:03 +0000 Subject: [PATCH 29/31] feat(client-frauddetector): The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs. --- .../src/models/models_0.ts | 510 +++++++++++++++++- .../src/protocols/Aws_json1_1.ts | 257 ++++++++- .../sdk-codegen/aws-models/frauddetector.json | 374 ++++++++++++- 3 files changed, 1125 insertions(+), 16 deletions(-) diff --git a/clients/client-frauddetector/src/models/models_0.ts b/clients/client-frauddetector/src/models/models_0.ts index 955a7ebe5e2e6..63f36c3c85ba6 100644 --- a/clients/client-frauddetector/src/models/models_0.ts +++ b/clients/client-frauddetector/src/models/models_0.ts @@ -22,6 +22,110 @@ export class AccessDeniedException extends __BaseException { } } +/** + *

                        The log odds metric details.

                        + * + *

                        Account Takeover Insights (ATI) model uses event variables from the login data you + * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. + * In this case, event variables used to derive the aggregated variables are IP address and user.

                        + */ +export interface AggregatedLogOddsMetric { + /** + *

                        + * The names of all the variables. + *

                        + */ + variableNames: string[] | undefined; + + /** + *

                        + * The relative importance of the variables in the list to the other event variable. + *

                        + */ + aggregatedVariablesImportance: number | undefined; +} + +export namespace AggregatedLogOddsMetric { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AggregatedLogOddsMetric): any => ({ + ...obj, + }); +} + +/** + *

                        + * The details of the impact of aggregated variables on the prediction score.

                        + * + *

                        Account Takeover Insights (ATI) model uses the login data you + * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, the model might calculate the number of times an user has logged in using the same IP address. + * In this case, event variables used to derive the aggregated variables are IP address and user.

                        + */ +export interface AggregatedVariablesImpactExplanation { + /** + *

                        + * The names of all the event variables that were used to derive the aggregated variables. + *

                        + */ + eventVariableNames?: string[]; + + /** + *

                        + * The relative impact of the aggregated variables in terms of magnitude on the prediction scores. + *

                        + */ + relativeImpact?: string; + + /** + *

                        + * The raw, uninterpreted value represented as log-odds of the fraud. These values are usually between -10 to +10, but range from -infinity to +infinity.

                        + *
                          + *
                        • + *

                          A positive value indicates that the variables drove the risk score up.

                          + *
                        • + *
                        • + *

                          A negative value indicates that the variables drove the risk score down.

                          + *
                        • + *
                        + */ + logOddsImpact?: number; +} + +export namespace AggregatedVariablesImpactExplanation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AggregatedVariablesImpactExplanation): any => ({ + ...obj, + }); +} + +/** + *

                        The details of the relative importance of the aggregated variables.

                        + * + *

                        Account Takeover Insights (ATI) model uses event variables from the login data you + * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. + * In this case, event variables used to derive the aggregated variables are IP address and user.

                        + */ +export interface AggregatedVariablesImportanceMetrics { + /** + *

                        + * List of variables' metrics. + *

                        + */ + logOddsMetrics?: AggregatedLogOddsMetric[]; +} + +export namespace AggregatedVariablesImportanceMetrics { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AggregatedVariablesImportanceMetrics): any => ({ + ...obj, + }); +} + export enum AsyncJobStatus { CANCELED = "CANCELED", CANCEL_IN_PROGRESS = "CANCEL_IN_PROGRESS", @@ -31,6 +135,110 @@ export enum AsyncJobStatus { IN_PROGRESS_INITIALIZING = "IN_PROGRESS_INITIALIZING", } +/** + *

                        + * The Account Takeover Insights (ATI) model performance metrics data points. + *

                        + */ +export interface ATIMetricDataPoint { + /** + *

                        + * The challenge rate. This indicates the percentage of login events that the model recommends to challenge such as + * one-time password, multi-factor authentication, and investigations. + *

                        + */ + cr?: number; + + /** + *

                        + * The anomaly discovery rate. This metric quantifies the percentage of anomalies that can be detected by the model at the selected score threshold. + * A lower score threshold increases the percentage of anomalies captured by the model, but would also require challenging a larger percentage of + * login events, leading to a higher customer friction. + *

                        + */ + adr?: number; + + /** + *

                        + * The model's threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is + * labeled as fraud. + *

                        + */ + threshold?: number; + + /** + *

                        + * The account takeover discovery rate. This metric quantifies the percentage of account compromise events that can be detected by the model at the selected score threshold. + * This metric is only available if 50 or more entities with at-least one labeled account takeover event is present in the ingested dataset. + *

                        + */ + atodr?: number; +} + +export namespace ATIMetricDataPoint { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ATIMetricDataPoint): any => ({ + ...obj, + }); +} + +/** + *

                        + * The Account Takeover Insights (ATI) model performance score. + *

                        + */ +export interface ATIModelPerformance { + /** + *

                        + * The anomaly separation index (ASI) score. This metric summarizes the overall ability of the model to separate anomalous activities from the normal behavior. Depending on the business, a + * large fraction of these anomalous activities can be malicious and correspond to the account takeover attacks. A model with no separability power will have the lowest possible + * ASI score of 0.5, whereas the a model with a high separability power will have the highest possible ASI score of 1.0 + *

                        + */ + asi?: number; +} + +export namespace ATIModelPerformance { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ATIModelPerformance): any => ({ + ...obj, + }); +} + +/** + *

                        + * The Account Takeover Insights (ATI) model training metric details. + *

                        + */ +export interface ATITrainingMetricsValue { + /** + *

                        + * The model's performance metrics data points. + *

                        + */ + metricDataPoints?: ATIMetricDataPoint[]; + + /** + *

                        + * The model's overall performance scores. + *

                        + */ + modelPerformance?: ATIModelPerformance; +} + +export namespace ATITrainingMetricsValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ATITrainingMetricsValue): any => ({ + ...obj, + }); +} + /** *

                        A key and value pair.

                        */ @@ -554,6 +762,7 @@ export namespace CreateBatchPredictionJobResult { } export enum ModelTypeEnum { + ACCOUNT_TAKEOVER_INSIGHTS = "ACCOUNT_TAKEOVER_INSIGHTS", ONLINE_FRAUD_INSIGHTS = "ONLINE_FRAUD_INSIGHTS", TRANSACTION_FRAUD_INSIGHTS = "TRANSACTION_FRAUD_INSIGHTS", } @@ -836,7 +1045,7 @@ export interface LabelSchema { *

                        The label mapper maps the Amazon Fraud Detector supported model classification labels (FRAUD, LEGIT) to the appropriate event type labels. For example, if "FRAUD" and "LEGIT" are Amazon Fraud Detector supported labels, this mapper could be: {"FRAUD" => ["0"], "LEGIT" => ["1"]} or {"FRAUD" => ["false"], "LEGIT" => ["true"]} or {"FRAUD" => ["fraud", "abuse"], "LEGIT" => ["legit", "safe"]}. The value part of the mapper is a list, because you may have multiple label variants from your event type for a single Amazon Fraud Detector label. *

                        */ - labelMapper: Record | undefined; + labelMapper?: Record; /** *

                        The action to take for unlabeled events.

                        @@ -865,7 +1074,7 @@ export interface TrainingDataSchema { /** *

                        The label schema.

                        */ - labelSchema: LabelSchema | undefined; + labelSchema?: LabelSchema; } export namespace TrainingDataSchema { @@ -1746,11 +1955,11 @@ export namespace FileValidationMessage { } /** - *

                        The model training validation messages.

                        + *

                        The model training data validation metrics.

                        */ export interface DataValidationMetrics { /** - *

                        The file-specific model training validation messages.

                        + *

                        The file-specific model training data validation messages.

                        */ fileLevelMessages?: FileValidationMessage[]; @@ -1904,6 +2113,281 @@ export namespace TrainingResult { }); } +/** + *

                        + * The Online Fraud Insights (OFI) model performance metrics data points. + *

                        + */ +export interface OFIMetricDataPoint { + /** + *

                        + * The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud. + *

                        + */ + fpr?: number; + + /** + *

                        + * The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent. + *

                        + */ + precision?: number; + + /** + *

                        + * The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate. + *

                        + */ + tpr?: number; + + /** + *

                        + * The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud. + *

                        + */ + threshold?: number; +} + +export namespace OFIMetricDataPoint { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OFIMetricDataPoint): any => ({ + ...obj, + }); +} + +/** + *

                        + * The Online Fraud Insights (OFI) model performance score. + *

                        + */ +export interface OFIModelPerformance { + /** + *

                        + * The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds. + *

                        + */ + auc?: number; +} + +export namespace OFIModelPerformance { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OFIModelPerformance): any => ({ + ...obj, + }); +} + +/** + *

                        + * The Online Fraud Insights (OFI) model training metric details. + *

                        + */ +export interface OFITrainingMetricsValue { + /** + *

                        + * The model's performance metrics data points. + *

                        + */ + metricDataPoints?: OFIMetricDataPoint[]; + + /** + *

                        + * The model's overall performance score. + *

                        + */ + modelPerformance?: OFIModelPerformance; +} + +export namespace OFITrainingMetricsValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OFITrainingMetricsValue): any => ({ + ...obj, + }); +} + +/** + *

                        + * The performance metrics data points for Transaction Fraud Insights (TFI) model. + *

                        + */ +export interface TFIMetricDataPoint { + /** + *

                        + * The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud. + *

                        + */ + fpr?: number; + + /** + *

                        + * The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent. + *

                        + */ + precision?: number; + + /** + *

                        + * The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate. + *

                        + */ + tpr?: number; + + /** + *

                        + * The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any + * model score 500 or above is labeled as fraud. + *

                        + */ + threshold?: number; +} + +export namespace TFIMetricDataPoint { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TFIMetricDataPoint): any => ({ + ...obj, + }); +} + +/** + *

                        + * The Transaction Fraud Insights (TFI) model performance score. + *

                        + */ +export interface TFIModelPerformance { + /** + *

                        + * The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds. + *

                        + */ + auc?: number; +} + +export namespace TFIModelPerformance { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TFIModelPerformance): any => ({ + ...obj, + }); +} + +/** + *

                        + * The Transaction Fraud Insights (TFI) model training metric details. + *

                        + */ +export interface TFITrainingMetricsValue { + /** + *

                        + * The model's performance metrics data points. + *

                        + */ + metricDataPoints?: TFIMetricDataPoint[]; + + /** + *

                        + * The model performance score. + *

                        + */ + modelPerformance?: TFIModelPerformance; +} + +export namespace TFITrainingMetricsValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TFITrainingMetricsValue): any => ({ + ...obj, + }); +} + +/** + *

                        + * The training metrics details. + *

                        + */ +export interface TrainingMetricsV2 { + /** + *

                        + * The Online Fraud Insights (OFI) model training metric details. + *

                        + */ + ofi?: OFITrainingMetricsValue; + + /** + *

                        + * The Transaction Fraud Insights (TFI) model training metric details. + *

                        + */ + tfi?: TFITrainingMetricsValue; + + /** + *

                        + * The Account Takeover Insights (ATI) model training metric details. + *

                        + */ + ati?: ATITrainingMetricsValue; +} + +export namespace TrainingMetricsV2 { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TrainingMetricsV2): any => ({ + ...obj, + }); +} + +/** + *

                        + * The training result details. + *

                        + */ +export interface TrainingResultV2 { + /** + *

                        The model training data validation metrics.

                        + */ + dataValidationMetrics?: DataValidationMetrics; + + /** + *

                        + * The training metric details. + *

                        + */ + trainingMetricsV2?: TrainingMetricsV2; + + /** + *

                        The variable importance metrics details.

                        + */ + variableImportanceMetrics?: VariableImportanceMetrics; + + /** + *

                        + * The variable importance metrics of the aggregated variables. + *

                        + *

                        Account Takeover Insights (ATI) model uses event variables from the login data you + * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. + * In this case, event variables used to derive the aggregated variables are IP address and user.

                        + */ + aggregatedVariablesImportanceMetrics?: AggregatedVariablesImportanceMetrics; +} + +export namespace TrainingResultV2 { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TrainingResultV2): any => ({ + ...obj, + }); +} + /** *

                        The details of the model version.

                        */ @@ -1967,6 +2451,13 @@ export interface ModelVersionDetail { *

                        The model version ARN.

                        */ arn?: string; + + /** + *

                        + * The training result details. The details include the relative importance of the variables. + *

                        + */ + trainingResultV2?: TrainingResultV2; } export namespace ModelVersionDetail { @@ -3071,6 +3562,17 @@ export interface PredictionExplanations { *

                        */ variableImpactExplanations?: VariableImpactExplanation[]; + + /** + *

                        + * The details of the aggregated variables impact on the prediction score. + *

                        + * + *

                        Account Takeover Insights (ATI) model uses event variables from the login data you + * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. + * In this case, event variables used to derive the aggregated variables are IP address and user.

                        + */ + aggregatedVariablesImpactExplanations?: AggregatedVariablesImpactExplanation[]; } export namespace PredictionExplanations { diff --git a/clients/client-frauddetector/src/protocols/Aws_json1_1.ts b/clients/client-frauddetector/src/protocols/Aws_json1_1.ts index a024183951327..58ccbc268290e 100644 --- a/clients/client-frauddetector/src/protocols/Aws_json1_1.ts +++ b/clients/client-frauddetector/src/protocols/Aws_json1_1.ts @@ -155,6 +155,12 @@ import { UpdateVariableCommandInput, UpdateVariableCommandOutput } from "../comm import { FraudDetectorServiceException as __BaseException } from "../models/FraudDetectorServiceException"; import { AccessDeniedException, + AggregatedLogOddsMetric, + AggregatedVariablesImpactExplanation, + AggregatedVariablesImportanceMetrics, + ATIMetricDataPoint, + ATIModelPerformance, + ATITrainingMetricsValue, BatchCreateVariableError, BatchCreateVariableRequest, BatchCreateVariableResult, @@ -291,6 +297,9 @@ import { ModelVersion, ModelVersionDetail, ModelVersionEvaluation, + OFIMetricDataPoint, + OFIModelPerformance, + OFITrainingMetricsValue, Outcome, PredictionExplanations, PredictionTimeRange, @@ -318,10 +327,15 @@ import { Tag, TagResourceRequest, TagResourceResult, + TFIMetricDataPoint, + TFIModelPerformance, + TFITrainingMetricsValue, ThrottlingException, TrainingDataSchema, TrainingMetrics, + TrainingMetricsV2, TrainingResult, + TrainingResultV2, UntagResourceRequest, UntagResourceResult, UpdateDetectorVersionMetadataRequest, @@ -5677,7 +5691,7 @@ const serializeAws_json1_1labelMapper = (input: Record, contex } return { ...acc, - [key]: serializeAws_json1_1NonEmptyListOfStrings(value, context), + [key]: serializeAws_json1_1ListOfStrings(value, context), }; }, {}); }; @@ -6130,6 +6144,86 @@ const deserializeAws_json1_1AccessDeniedException = (output: any, context: __Ser } as any; }; +const deserializeAws_json1_1AggregatedLogOddsMetric = ( + output: any, + context: __SerdeContext +): AggregatedLogOddsMetric => { + return { + aggregatedVariablesImportance: __limitedParseFloat32(output.aggregatedVariablesImportance), + variableNames: + output.variableNames != null ? deserializeAws_json1_1ListOfStrings(output.variableNames, context) : undefined, + } as any; +}; + +const deserializeAws_json1_1AggregatedVariablesImpactExplanation = ( + output: any, + context: __SerdeContext +): AggregatedVariablesImpactExplanation => { + return { + eventVariableNames: + output.eventVariableNames != null + ? deserializeAws_json1_1ListOfStrings(output.eventVariableNames, context) + : undefined, + logOddsImpact: __limitedParseFloat32(output.logOddsImpact), + relativeImpact: __expectString(output.relativeImpact), + } as any; +}; + +const deserializeAws_json1_1AggregatedVariablesImportanceMetrics = ( + output: any, + context: __SerdeContext +): AggregatedVariablesImportanceMetrics => { + return { + logOddsMetrics: + output.logOddsMetrics != null + ? deserializeAws_json1_1ListOfAggregatedLogOddsMetrics(output.logOddsMetrics, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1ATIMetricDataPoint = (output: any, context: __SerdeContext): ATIMetricDataPoint => { + return { + adr: __limitedParseFloat32(output.adr), + atodr: __limitedParseFloat32(output.atodr), + cr: __limitedParseFloat32(output.cr), + threshold: __limitedParseFloat32(output.threshold), + } as any; +}; + +const deserializeAws_json1_1ATIMetricDataPointsList = (output: any, context: __SerdeContext): ATIMetricDataPoint[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1ATIMetricDataPoint(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_1ATIModelPerformance = (output: any, context: __SerdeContext): ATIModelPerformance => { + return { + asi: __limitedParseFloat32(output.asi), + } as any; +}; + +const deserializeAws_json1_1ATITrainingMetricsValue = ( + output: any, + context: __SerdeContext +): ATITrainingMetricsValue => { + return { + metricDataPoints: + output.metricDataPoints != null + ? deserializeAws_json1_1ATIMetricDataPointsList(output.metricDataPoints, context) + : undefined, + modelPerformance: + output.modelPerformance != null + ? deserializeAws_json1_1ATIModelPerformance(output.modelPerformance, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1BatchCreateVariableError = ( output: any, context: __SerdeContext @@ -7085,7 +7179,7 @@ const deserializeAws_json1_1labelMapper = (output: any, context: __SerdeContext) } return { ...acc, - [key]: deserializeAws_json1_1NonEmptyListOfStrings(value, context), + [key]: deserializeAws_json1_1ListOfStrings(value, context), }; }, {}); }; @@ -7111,6 +7205,36 @@ const deserializeAws_json1_1ListEventPredictionsResult = ( } as any; }; +const deserializeAws_json1_1ListOfAggregatedLogOddsMetrics = ( + output: any, + context: __SerdeContext +): AggregatedLogOddsMetric[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1AggregatedLogOddsMetric(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_1ListOfAggregatedVariablesImpactExplanations = ( + output: any, + context: __SerdeContext +): AggregatedVariablesImpactExplanation[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1AggregatedVariablesImpactExplanation(entry, context); + }); + return retVal; +}; + const deserializeAws_json1_1listOfEntities = (output: any, context: __SerdeContext): Entity[] => { const retVal = (output || []) .filter((e: any) => e != null) @@ -7446,6 +7570,10 @@ const deserializeAws_json1_1ModelVersionDetail = (output: any, context: __SerdeC trainingDataSource: __expectString(output.trainingDataSource), trainingResult: output.trainingResult != null ? deserializeAws_json1_1TrainingResult(output.trainingResult, context) : undefined, + trainingResultV2: + output.trainingResultV2 != null + ? deserializeAws_json1_1TrainingResultV2(output.trainingResultV2, context) + : undefined, } as any; }; @@ -7484,6 +7612,49 @@ const deserializeAws_json1_1NonEmptyListOfStrings = (output: any, context: __Ser return retVal; }; +const deserializeAws_json1_1OFIMetricDataPoint = (output: any, context: __SerdeContext): OFIMetricDataPoint => { + return { + fpr: __limitedParseFloat32(output.fpr), + precision: __limitedParseFloat32(output.precision), + threshold: __limitedParseFloat32(output.threshold), + tpr: __limitedParseFloat32(output.tpr), + } as any; +}; + +const deserializeAws_json1_1OFIMetricDataPointsList = (output: any, context: __SerdeContext): OFIMetricDataPoint[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1OFIMetricDataPoint(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_1OFIModelPerformance = (output: any, context: __SerdeContext): OFIModelPerformance => { + return { + auc: __limitedParseFloat32(output.auc), + } as any; +}; + +const deserializeAws_json1_1OFITrainingMetricsValue = ( + output: any, + context: __SerdeContext +): OFITrainingMetricsValue => { + return { + metricDataPoints: + output.metricDataPoints != null + ? deserializeAws_json1_1OFIMetricDataPointsList(output.metricDataPoints, context) + : undefined, + modelPerformance: + output.modelPerformance != null + ? deserializeAws_json1_1OFIModelPerformance(output.modelPerformance, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1Outcome = (output: any, context: __SerdeContext): Outcome => { return { arn: __expectString(output.arn), @@ -7508,6 +7679,13 @@ const deserializeAws_json1_1OutcomeList = (output: any, context: __SerdeContext) const deserializeAws_json1_1PredictionExplanations = (output: any, context: __SerdeContext): PredictionExplanations => { return { + aggregatedVariablesImpactExplanations: + output.aggregatedVariablesImpactExplanations != null + ? deserializeAws_json1_1ListOfAggregatedVariablesImpactExplanations( + output.aggregatedVariablesImpactExplanations, + context + ) + : undefined, variableImpactExplanations: output.variableImpactExplanations != null ? deserializeAws_json1_1listOfVariableImpactExplanations(output.variableImpactExplanations, context) @@ -7646,6 +7824,49 @@ const deserializeAws_json1_1TagResourceResult = (output: any, context: __SerdeCo return {} as any; }; +const deserializeAws_json1_1TFIMetricDataPoint = (output: any, context: __SerdeContext): TFIMetricDataPoint => { + return { + fpr: __limitedParseFloat32(output.fpr), + precision: __limitedParseFloat32(output.precision), + threshold: __limitedParseFloat32(output.threshold), + tpr: __limitedParseFloat32(output.tpr), + } as any; +}; + +const deserializeAws_json1_1TFIMetricDataPointsList = (output: any, context: __SerdeContext): TFIMetricDataPoint[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1TFIMetricDataPoint(entry, context); + }); + return retVal; +}; + +const deserializeAws_json1_1TFIModelPerformance = (output: any, context: __SerdeContext): TFIModelPerformance => { + return { + auc: __limitedParseFloat32(output.auc), + } as any; +}; + +const deserializeAws_json1_1TFITrainingMetricsValue = ( + output: any, + context: __SerdeContext +): TFITrainingMetricsValue => { + return { + metricDataPoints: + output.metricDataPoints != null + ? deserializeAws_json1_1TFIMetricDataPointsList(output.metricDataPoints, context) + : undefined, + modelPerformance: + output.modelPerformance != null + ? deserializeAws_json1_1TFIModelPerformance(output.modelPerformance, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1ThrottlingException = (output: any, context: __SerdeContext): ThrottlingException => { return { message: __expectString(output.message), @@ -7671,6 +7892,14 @@ const deserializeAws_json1_1TrainingMetrics = (output: any, context: __SerdeCont } as any; }; +const deserializeAws_json1_1TrainingMetricsV2 = (output: any, context: __SerdeContext): TrainingMetricsV2 => { + return { + ati: output.ati != null ? deserializeAws_json1_1ATITrainingMetricsValue(output.ati, context) : undefined, + ofi: output.ofi != null ? deserializeAws_json1_1OFITrainingMetricsValue(output.ofi, context) : undefined, + tfi: output.tfi != null ? deserializeAws_json1_1TFITrainingMetricsValue(output.tfi, context) : undefined, + } as any; +}; + const deserializeAws_json1_1TrainingResult = (output: any, context: __SerdeContext): TrainingResult => { return { dataValidationMetrics: @@ -7688,6 +7917,30 @@ const deserializeAws_json1_1TrainingResult = (output: any, context: __SerdeConte } as any; }; +const deserializeAws_json1_1TrainingResultV2 = (output: any, context: __SerdeContext): TrainingResultV2 => { + return { + aggregatedVariablesImportanceMetrics: + output.aggregatedVariablesImportanceMetrics != null + ? deserializeAws_json1_1AggregatedVariablesImportanceMetrics( + output.aggregatedVariablesImportanceMetrics, + context + ) + : undefined, + dataValidationMetrics: + output.dataValidationMetrics != null + ? deserializeAws_json1_1DataValidationMetrics(output.dataValidationMetrics, context) + : undefined, + trainingMetricsV2: + output.trainingMetricsV2 != null + ? deserializeAws_json1_1TrainingMetricsV2(output.trainingMetricsV2, context) + : undefined, + variableImportanceMetrics: + output.variableImportanceMetrics != null + ? deserializeAws_json1_1VariableImportanceMetrics(output.variableImportanceMetrics, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1UntagResourceResult = (output: any, context: __SerdeContext): UntagResourceResult => { return {} as any; }; diff --git a/codegen/sdk-codegen/aws-models/frauddetector.json b/codegen/sdk-codegen/aws-models/frauddetector.json index 032194fe9d5d0..f236df58e9786 100644 --- a/codegen/sdk-codegen/aws-models/frauddetector.json +++ b/codegen/sdk-codegen/aws-models/frauddetector.json @@ -29,6 +29,78 @@ ] }, "shapes": { + "com.amazonaws.frauddetector#ATIMetricDataPoint": { + "type": "structure", + "members": { + "cr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The challenge rate. This indicates the percentage of login events that the model recommends to challenge such as \n one-time password, multi-factor authentication, and investigations.\n

                        " + } + }, + "adr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The anomaly discovery rate. This metric quantifies the percentage of anomalies that can be detected by the model at the selected score threshold. \n A lower score threshold increases the percentage of anomalies captured by the model, but would also require challenging a larger percentage of \n login events, leading to a higher customer friction.\n

                        " + } + }, + "threshold": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The model's threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is \n labeled as fraud.\n

                        " + } + }, + "atodr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The account takeover discovery rate. This metric quantifies the percentage of account compromise events that can be detected by the model at the selected score threshold.\n This metric is only available if 50 or more entities with at-least one labeled account takeover event is present in the ingested dataset.\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The Account Takeover Insights (ATI) model performance metrics data points.\n

                        " + } + }, + "com.amazonaws.frauddetector#ATIMetricDataPointsList": { + "type": "list", + "member": { + "target": "com.amazonaws.frauddetector#ATIMetricDataPoint" + } + }, + "com.amazonaws.frauddetector#ATIModelPerformance": { + "type": "structure", + "members": { + "asi": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The anomaly separation index (ASI) score. This metric summarizes the overall ability of the model to separate anomalous activities from the normal behavior. Depending on the business, a \n large fraction of these anomalous activities can be malicious and correspond to the account takeover attacks. A model with no separability power will have the lowest possible \n ASI score of 0.5, whereas the a model with a high separability power will have the highest possible ASI score of 1.0\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The Account Takeover Insights (ATI) model performance score.\n

                        " + } + }, + "com.amazonaws.frauddetector#ATITrainingMetricsValue": { + "type": "structure", + "members": { + "metricDataPoints": { + "target": "com.amazonaws.frauddetector#ATIMetricDataPointsList", + "traits": { + "smithy.api#documentation": "

                        \n The model's performance metrics data points.\n

                        " + } + }, + "modelPerformance": { + "target": "com.amazonaws.frauddetector#ATIModelPerformance", + "traits": { + "smithy.api#documentation": "

                        \n The model's overall performance scores.\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The Account Takeover Insights (ATI) model training metric details. \n

                        " + } + }, "com.amazonaws.frauddetector#AWSHawksNestServiceFacade": { "type": "service", "traits": { @@ -273,6 +345,68 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.frauddetector#AggregatedLogOddsMetric": { + "type": "structure", + "members": { + "variableNames": { + "target": "com.amazonaws.frauddetector#ListOfStrings", + "traits": { + "smithy.api#documentation": "

                        \n The names of all the variables. \n

                        ", + "smithy.api#required": {} + } + }, + "aggregatedVariablesImportance": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The relative importance of the variables in the list to the other event variable. \n

                        ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                        The log odds metric details.

                        \n \n

                        Account Takeover Insights (ATI) model uses event variables from the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address and user.

                        " + } + }, + "com.amazonaws.frauddetector#AggregatedVariablesImpactExplanation": { + "type": "structure", + "members": { + "eventVariableNames": { + "target": "com.amazonaws.frauddetector#ListOfStrings", + "traits": { + "smithy.api#documentation": "

                        \n The names of all the event variables that were used to derive the aggregated variables. \n

                        " + } + }, + "relativeImpact": { + "target": "com.amazonaws.frauddetector#string", + "traits": { + "smithy.api#documentation": "

                        \n The relative impact of the aggregated variables in terms of magnitude on the prediction scores. \n

                        " + } + }, + "logOddsImpact": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The raw, uninterpreted value represented as log-odds of the fraud. These values are usually between -10 to +10, but range from -infinity to +infinity.

                        \n
                          \n
                        • \n

                          A positive value indicates that the variables drove the risk score up.

                          \n
                        • \n
                        • \n

                          A negative value indicates that the variables drove the risk score down.

                          \n
                        • \n
                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The details of the impact of aggregated variables on the prediction score.

                        \n \n

                        Account Takeover Insights (ATI) model uses the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, the model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address and user.

                        " + } + }, + "com.amazonaws.frauddetector#AggregatedVariablesImportanceMetrics": { + "type": "structure", + "members": { + "logOddsMetrics": { + "target": "com.amazonaws.frauddetector#ListOfAggregatedLogOddsMetrics", + "traits": { + "smithy.api#documentation": "

                        \n List of variables' metrics.\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        The details of the relative importance of the aggregated variables.

                        \n \n

                        Account Takeover Insights (ATI) model uses event variables from the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address and user.

                        " + } + }, "com.amazonaws.frauddetector#AsyncJobStatus": { "type": "string", "traits": { @@ -1462,7 +1596,7 @@ "fileLevelMessages": { "target": "com.amazonaws.frauddetector#fileValidationMessageList", "traits": { - "smithy.api#documentation": "

                        The file-specific model training validation messages.

                        " + "smithy.api#documentation": "

                        The file-specific model training data validation messages.

                        " } }, "fieldLevelMessages": { @@ -1473,7 +1607,7 @@ } }, "traits": { - "smithy.api#documentation": "

                        The model training validation messages.

                        " + "smithy.api#documentation": "

                        The model training data validation metrics.

                        " } }, "com.amazonaws.frauddetector#DeleteAuditHistory": { @@ -4834,8 +4968,7 @@ "labelMapper": { "target": "com.amazonaws.frauddetector#labelMapper", "traits": { - "smithy.api#documentation": "

                        The label mapper maps the Amazon Fraud Detector supported model classification labels (FRAUD, LEGIT) to the appropriate event type labels. For example, if \"FRAUD\" and \"LEGIT\" are Amazon Fraud Detector supported labels, this mapper could be: {\"FRAUD\" => [\"0\"], \"LEGIT\" => [\"1\"]} or {\"FRAUD\" => [\"false\"], \"LEGIT\" => [\"true\"]} or {\"FRAUD\" => [\"fraud\", \"abuse\"], \"LEGIT\" => [\"legit\", \"safe\"]}. The value part of the mapper is a list, because you may have multiple label variants from your event type for a single Amazon Fraud Detector label.\n

                        ", - "smithy.api#required": {} + "smithy.api#documentation": "

                        The label mapper maps the Amazon Fraud Detector supported model classification labels (FRAUD, LEGIT) to the appropriate event type labels. For example, if \"FRAUD\" and \"LEGIT\" are Amazon Fraud Detector supported labels, this mapper could be: {\"FRAUD\" => [\"0\"], \"LEGIT\" => [\"1\"]} or {\"FRAUD\" => [\"false\"], \"LEGIT\" => [\"true\"]} or {\"FRAUD\" => [\"fraud\", \"abuse\"], \"LEGIT\" => [\"legit\", \"safe\"]}. The value part of the mapper is a list, because you may have multiple label variants from your event type for a single Amazon Fraud Detector label.\n

                        " } }, "unlabeledEventsTreatment": { @@ -4955,6 +5088,18 @@ } } }, + "com.amazonaws.frauddetector#ListOfAggregatedLogOddsMetrics": { + "type": "list", + "member": { + "target": "com.amazonaws.frauddetector#AggregatedLogOddsMetric" + } + }, + "com.amazonaws.frauddetector#ListOfAggregatedVariablesImpactExplanations": { + "type": "list", + "member": { + "target": "com.amazonaws.frauddetector#AggregatedVariablesImpactExplanation" + } + }, "com.amazonaws.frauddetector#ListOfEvaluatedExternalModels": { "type": "list", "member": { @@ -5404,6 +5549,10 @@ { "value": "TRANSACTION_FRAUD_INSIGHTS", "name": "TRANSACTION_FRAUD_INSIGHTS" + }, + { + "value": "ACCOUNT_TAKEOVER_INSIGHTS", + "name": "ACCOUNT_TAKEOVER_INSIGHTS" } ] } @@ -5517,6 +5666,12 @@ "traits": { "smithy.api#documentation": "

                        The model version ARN.

                        " } + }, + "trainingResultV2": { + "target": "com.amazonaws.frauddetector#TrainingResultV2", + "traits": { + "smithy.api#documentation": "

                        \n The training result details. The details include the relative importance of the variables.\n

                        " + } } }, "traits": { @@ -5591,6 +5746,78 @@ } } }, + "com.amazonaws.frauddetector#OFIMetricDataPoint": { + "type": "structure", + "members": { + "fpr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.\n

                        " + } + }, + "precision": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.\n

                        " + } + }, + "tpr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.\n

                        " + } + }, + "threshold": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The Online Fraud Insights (OFI) model performance metrics data points. \n

                        " + } + }, + "com.amazonaws.frauddetector#OFIMetricDataPointsList": { + "type": "list", + "member": { + "target": "com.amazonaws.frauddetector#OFIMetricDataPoint" + } + }, + "com.amazonaws.frauddetector#OFIModelPerformance": { + "type": "structure", + "members": { + "auc": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds.\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The Online Fraud Insights (OFI) model performance score.\n

                        " + } + }, + "com.amazonaws.frauddetector#OFITrainingMetricsValue": { + "type": "structure", + "members": { + "metricDataPoints": { + "target": "com.amazonaws.frauddetector#OFIMetricDataPointsList", + "traits": { + "smithy.api#documentation": "

                        \n The model's performance metrics data points.\n

                        " + } + }, + "modelPerformance": { + "target": "com.amazonaws.frauddetector#OFIModelPerformance", + "traits": { + "smithy.api#documentation": "

                        \n The model's overall performance score.\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The Online Fraud Insights (OFI) model training metric details. \n

                        " + } + }, "com.amazonaws.frauddetector#Outcome": { "type": "structure", "members": { @@ -5653,6 +5880,12 @@ "traits": { "smithy.api#documentation": "

                        \nThe details of the event variable's impact on the prediction score.\n

                        " } + }, + "aggregatedVariablesImpactExplanations": { + "target": "com.amazonaws.frauddetector#ListOfAggregatedVariablesImpactExplanations", + "traits": { + "smithy.api#documentation": "

                        \n The details of the aggregated variables impact on the prediction score. \n

                        \n \n

                        Account Takeover Insights (ATI) model uses event variables from the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address and user.

                        " + } } }, "traits": { @@ -6404,6 +6637,78 @@ "type": "structure", "members": {} }, + "com.amazonaws.frauddetector#TFIMetricDataPoint": { + "type": "structure", + "members": { + "fpr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.\n

                        " + } + }, + "precision": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.\n

                        " + } + }, + "tpr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.\n

                        " + } + }, + "threshold": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any \n model score 500 or above is labeled as fraud.\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The performance metrics data points for Transaction Fraud Insights (TFI) model. \n

                        " + } + }, + "com.amazonaws.frauddetector#TFIMetricDataPointsList": { + "type": "list", + "member": { + "target": "com.amazonaws.frauddetector#TFIMetricDataPoint" + } + }, + "com.amazonaws.frauddetector#TFIModelPerformance": { + "type": "structure", + "members": { + "auc": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "

                        \n The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds.\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The Transaction Fraud Insights (TFI) model performance score.\n

                        " + } + }, + "com.amazonaws.frauddetector#TFITrainingMetricsValue": { + "type": "structure", + "members": { + "metricDataPoints": { + "target": "com.amazonaws.frauddetector#TFIMetricDataPointsList", + "traits": { + "smithy.api#documentation": "

                        \n The model's performance metrics data points.\n

                        " + } + }, + "modelPerformance": { + "target": "com.amazonaws.frauddetector#TFIModelPerformance", + "traits": { + "smithy.api#documentation": "

                        \n The model performance score.\n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The Transaction Fraud Insights (TFI) model training metric details. \n

                        " + } + }, "com.amazonaws.frauddetector#Tag": { "type": "structure", "members": { @@ -6512,10 +6817,7 @@ } }, "labelSchema": { - "target": "com.amazonaws.frauddetector#LabelSchema", - "traits": { - "smithy.api#required": {} - } + "target": "com.amazonaws.frauddetector#LabelSchema" } }, "traits": { @@ -6557,6 +6859,32 @@ "smithy.api#documentation": "

                        The training metric details.

                        " } }, + "com.amazonaws.frauddetector#TrainingMetricsV2": { + "type": "structure", + "members": { + "ofi": { + "target": "com.amazonaws.frauddetector#OFITrainingMetricsValue", + "traits": { + "smithy.api#documentation": "

                        \n The Online Fraud Insights (OFI) model training metric details. \n

                        " + } + }, + "tfi": { + "target": "com.amazonaws.frauddetector#TFITrainingMetricsValue", + "traits": { + "smithy.api#documentation": "

                        \n The Transaction Fraud Insights (TFI) model training metric details. \n

                        " + } + }, + "ati": { + "target": "com.amazonaws.frauddetector#ATITrainingMetricsValue", + "traits": { + "smithy.api#documentation": "

                        \n The Account Takeover Insights (ATI) model training metric details. \n

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The training metrics details.\n

                        " + } + }, "com.amazonaws.frauddetector#TrainingResult": { "type": "structure", "members": { @@ -6583,6 +6911,32 @@ "smithy.api#documentation": "

                        The training result details.

                        " } }, + "com.amazonaws.frauddetector#TrainingResultV2": { + "type": "structure", + "members": { + "dataValidationMetrics": { + "target": "com.amazonaws.frauddetector#DataValidationMetrics" + }, + "trainingMetricsV2": { + "target": "com.amazonaws.frauddetector#TrainingMetricsV2", + "traits": { + "smithy.api#documentation": "

                        \n The training metric details.\n

                        " + } + }, + "variableImportanceMetrics": { + "target": "com.amazonaws.frauddetector#VariableImportanceMetrics" + }, + "aggregatedVariablesImportanceMetrics": { + "target": "com.amazonaws.frauddetector#AggregatedVariablesImportanceMetrics", + "traits": { + "smithy.api#documentation": "

                        \n The variable importance metrics of the aggregated variables.\n

                        \n

                        Account Takeover Insights (ATI) model uses event variables from the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address and user.

                        " + } + } + }, + "traits": { + "smithy.api#documentation": "

                        \n The training result details.\n

                        " + } + }, "com.amazonaws.frauddetector#UnlabeledEventsTreatment": { "type": "string", "traits": { @@ -7750,7 +8104,7 @@ "target": "com.amazonaws.frauddetector#string" }, "value": { - "target": "com.amazonaws.frauddetector#NonEmptyListOfStrings" + "target": "com.amazonaws.frauddetector#ListOfStrings" } }, "com.amazonaws.frauddetector#labelsMaxResults": { @@ -7936,7 +8290,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 1024 + "max": 8192 }, "smithy.api#sensitive": {} } From 91730562c1fe693c0270b088e5bd0c02d50372d4 Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:23:03 +0000 Subject: [PATCH 30/31] feat(client-docdb): Enable copy-on-write restore type --- clients/client-docdb/src/models/models_0.ts | 40 ++++++++++++++++++ .../client-docdb/src/protocols/Aws_query.ts | 17 ++++++++ codegen/sdk-codegen/aws-models/docdb.json | 42 +++++++++++++++++++ 3 files changed, 99 insertions(+) diff --git a/clients/client-docdb/src/models/models_0.ts b/clients/client-docdb/src/models/models_0.ts index 374280772169e..f5a12074c18cc 100644 --- a/clients/client-docdb/src/models/models_0.ts +++ b/clients/client-docdb/src/models/models_0.ts @@ -1482,6 +1482,11 @@ export interface DBCluster { */ AssociatedRoles?: DBClusterRole[]; + /** + *

                        Identifies the clone group to which the DB cluster is associated.

                        + */ + CloneGroupId?: string; + /** *

                        Specifies the time when the cluster was created, in Universal Coordinated Time * (UTC).

                        @@ -1986,6 +1991,11 @@ export interface CreateDBInstanceMessage { */ DBClusterIdentifier: string | undefined; + /** + *

                        A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

                        + */ + CopyTagsToSnapshot?: boolean; + /** *

                        A value that specifies the order in which an Amazon DocumentDB replica is promoted to the * primary instance after a failure of the existing primary instance.

                        @@ -2434,6 +2444,11 @@ export interface DBInstance { */ CACertificateIdentifier?: string; + /** + *

                        A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

                        + */ + CopyTagsToSnapshot?: boolean; + /** *

                        A value that specifies the order in which an Amazon DocumentDB replica is promoted to the * primary instance after a failure of the existing primary instance.

                        @@ -5684,6 +5699,11 @@ export interface ModifyDBInstanceMessage { */ CACertificateIdentifier?: string; + /** + *

                        A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

                        + */ + CopyTagsToSnapshot?: boolean; + /** *

                        A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.

                        *

                        Default: 1

                        @@ -6326,6 +6346,26 @@ export interface RestoreDBClusterToPointInTimeMessage { */ DBClusterIdentifier: string | undefined; + /** + *

                        The type of restore to be performed. You can specify one of the following values:

                        + *
                          + *
                        • + *

                          + * full-copy - The new DB cluster is restored as a full copy of the + * source DB cluster.

                          + *
                        • + *
                        • + *

                          + * copy-on-write - The new DB cluster is restored as a clone of the + * source DB cluster.

                          + *
                        • + *
                        + *

                        Constraints: You can't specify copy-on-write if the engine version of the source DB cluster is earlier than 1.11.

                        + *

                        If you don't specify a RestoreType value, then the new DB cluster is + * restored as a full copy of the source DB cluster.

                        + */ + RestoreType?: string; + /** *

                        The identifier of the source cluster from which to restore.

                        *

                        Constraints:

                        diff --git a/clients/client-docdb/src/protocols/Aws_query.ts b/clients/client-docdb/src/protocols/Aws_query.ts index 4583ca7d61601..11bb216ad5a89 100644 --- a/clients/client-docdb/src/protocols/Aws_query.ts +++ b/clients/client-docdb/src/protocols/Aws_query.ts @@ -5068,6 +5068,9 @@ const serializeAws_queryCreateDBInstanceMessage = (input: CreateDBInstanceMessag if (input.DBClusterIdentifier !== undefined && input.DBClusterIdentifier !== null) { entries["DBClusterIdentifier"] = input.DBClusterIdentifier; } + if (input.CopyTagsToSnapshot !== undefined && input.CopyTagsToSnapshot !== null) { + entries["CopyTagsToSnapshot"] = input.CopyTagsToSnapshot; + } if (input.PromotionTier !== undefined && input.PromotionTier !== null) { entries["PromotionTier"] = input.PromotionTier; } @@ -5914,6 +5917,9 @@ const serializeAws_queryModifyDBInstanceMessage = (input: ModifyDBInstanceMessag if (input.CACertificateIdentifier !== undefined && input.CACertificateIdentifier !== null) { entries["CACertificateIdentifier"] = input.CACertificateIdentifier; } + if (input.CopyTagsToSnapshot !== undefined && input.CopyTagsToSnapshot !== null) { + entries["CopyTagsToSnapshot"] = input.CopyTagsToSnapshot; + } if (input.PromotionTier !== undefined && input.PromotionTier !== null) { entries["PromotionTier"] = input.PromotionTier; } @@ -6188,6 +6194,9 @@ const serializeAws_queryRestoreDBClusterToPointInTimeMessage = ( if (input.DBClusterIdentifier !== undefined && input.DBClusterIdentifier !== null) { entries["DBClusterIdentifier"] = input.DBClusterIdentifier; } + if (input.RestoreType !== undefined && input.RestoreType !== null) { + entries["RestoreType"] = input.RestoreType; + } if (input.SourceDBClusterIdentifier !== undefined && input.SourceDBClusterIdentifier !== null) { entries["SourceDBClusterIdentifier"] = input.SourceDBClusterIdentifier; } @@ -6620,6 +6629,7 @@ const deserializeAws_queryDBCluster = (output: any, context: __SerdeContext): DB DbClusterResourceId: undefined, DBClusterArn: undefined, AssociatedRoles: undefined, + CloneGroupId: undefined, ClusterCreateTime: undefined, EnabledCloudwatchLogsExports: undefined, DeletionProtection: undefined, @@ -6742,6 +6752,9 @@ const deserializeAws_queryDBCluster = (output: any, context: __SerdeContext): DB context ); } + if (output["CloneGroupId"] !== undefined) { + contents.CloneGroupId = __expectString(output["CloneGroupId"]); + } if (output["ClusterCreateTime"] !== undefined) { contents.ClusterCreateTime = __expectNonNull(__parseRfc3339DateTime(output["ClusterCreateTime"])); } @@ -7306,6 +7319,7 @@ const deserializeAws_queryDBInstance = (output: any, context: __SerdeContext): D KmsKeyId: undefined, DbiResourceId: undefined, CACertificateIdentifier: undefined, + CopyTagsToSnapshot: undefined, PromotionTier: undefined, DBInstanceArn: undefined, EnabledCloudwatchLogsExports: undefined, @@ -7395,6 +7409,9 @@ const deserializeAws_queryDBInstance = (output: any, context: __SerdeContext): D if (output["CACertificateIdentifier"] !== undefined) { contents.CACertificateIdentifier = __expectString(output["CACertificateIdentifier"]); } + if (output["CopyTagsToSnapshot"] !== undefined) { + contents.CopyTagsToSnapshot = __parseBoolean(output["CopyTagsToSnapshot"]); + } if (output["PromotionTier"] !== undefined) { contents.PromotionTier = __strictParseInt32(output["PromotionTier"]) as number; } diff --git a/codegen/sdk-codegen/aws-models/docdb.json b/codegen/sdk-codegen/aws-models/docdb.json index 416a7f55de24d..7fdd7c09c7503 100644 --- a/codegen/sdk-codegen/aws-models/docdb.json +++ b/codegen/sdk-codegen/aws-models/docdb.json @@ -84,6 +84,9 @@ "input": { "target": "com.amazonaws.docdb#AddTagsToResourceMessage" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.docdb#DBClusterNotFoundFault" @@ -1150,6 +1153,12 @@ "smithy.api#required": {} } }, + "CopyTagsToSnapshot": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

                        A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

                        " + } + }, "PromotionTier": { "target": "com.amazonaws.docdb#IntegerOptional", "traits": { @@ -1604,6 +1613,12 @@ "smithy.api#documentation": "

                        Provides a list of the Identity and Access Management (IAM) roles that are associated with the cluster. (IAM) roles that are associated with a cluster grant permission for the cluster to access other Amazon Web Services services on your behalf.

                        " } }, + "CloneGroupId": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

                        Identifies the clone group to which the DB cluster is associated.

                        " + } + }, "ClusterCreateTime": { "target": "com.amazonaws.docdb#TStamp", "traits": { @@ -2337,6 +2352,12 @@ "smithy.api#documentation": "

                        The identifier of the CA certificate for this DB instance.

                        " } }, + "CopyTagsToSnapshot": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

                        A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

                        " + } + }, "PromotionTier": { "target": "com.amazonaws.docdb#IntegerOptional", "traits": { @@ -2802,6 +2823,9 @@ "input": { "target": "com.amazonaws.docdb#DeleteDBClusterParameterGroupMessage" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.docdb#DBParameterGroupNotFoundFault" @@ -2937,6 +2961,9 @@ "input": { "target": "com.amazonaws.docdb#DeleteDBSubnetGroupMessage" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.docdb#DBSubnetGroupNotFoundFault" @@ -5411,6 +5438,12 @@ "smithy.api#documentation": "

                        Indicates the certificate that needs to be associated with the instance.

                        " } }, + "CopyTagsToSnapshot": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "

                        A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

                        " + } + }, "PromotionTier": { "target": "com.amazonaws.docdb#IntegerOptional", "traits": { @@ -6157,6 +6190,9 @@ "input": { "target": "com.amazonaws.docdb#RemoveTagsFromResourceMessage" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.docdb#DBClusterNotFoundFault" @@ -6494,6 +6530,12 @@ "smithy.api#required": {} } }, + "RestoreType": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "

                        The type of restore to be performed. You can specify one of the following values:

                        \n
                          \n
                        • \n

                          \n full-copy - The new DB cluster is restored as a full copy of the\n source DB cluster.

                          \n
                        • \n
                        • \n

                          \n copy-on-write - The new DB cluster is restored as a clone of the\n source DB cluster.

                          \n
                        • \n
                        \n

                        Constraints: You can't specify copy-on-write if the engine version of the source DB cluster is earlier than 1.11.

                        \n

                        If you don't specify a RestoreType value, then the new DB cluster is\n restored as a full copy of the source DB cluster.

                        " + } + }, "SourceDBClusterIdentifier": { "target": "com.amazonaws.docdb#String", "traits": { From 1ff8af289bc025ac593aca2e85946b9181818750 Mon Sep 17 00:00:00 2001 From: awstools Date: Thu, 21 Jul 2022 18:39:39 +0000 Subject: [PATCH 31/31] Publish v3.135.0 --- CHANGELOG.md | 18 ++++++++++++++++++ clients/client-athena/CHANGELOG.md | 11 +++++++++++ clients/client-athena/package.json | 2 +- clients/client-cloudwatch/CHANGELOG.md | 11 +++++++++++ clients/client-cloudwatch/package.json | 2 +- .../CHANGELOG.md | 8 ++++++++ .../package.json | 2 +- clients/client-docdb/CHANGELOG.md | 11 +++++++++++ clients/client-docdb/package.json | 2 +- .../client-ec2-instance-connect/CHANGELOG.md | 11 +++++++++++ .../client-ec2-instance-connect/package.json | 2 +- clients/client-frauddetector/CHANGELOG.md | 11 +++++++++++ clients/client-frauddetector/package.json | 2 +- clients/client-iotsitewise/CHANGELOG.md | 11 +++++++++++ clients/client-iotsitewise/package.json | 2 +- clients/client-kendra/CHANGELOG.md | 11 +++++++++++ clients/client-kendra/package.json | 2 +- clients/client-lex-runtime-v2/CHANGELOG.md | 8 ++++++++ clients/client-lex-runtime-v2/package.json | 2 +- clients/client-network-firewall/CHANGELOG.md | 11 +++++++++++ clients/client-network-firewall/package.json | 2 +- clients/client-rds/CHANGELOG.md | 8 ++++++++ clients/client-rds/package.json | 2 +- clients/client-s3/CHANGELOG.md | 8 ++++++++ clients/client-s3/package.json | 2 +- .../client-transcribe-streaming/CHANGELOG.md | 8 ++++++++ .../client-transcribe-streaming/package.json | 2 +- lerna.json | 2 +- lib/lib-storage/CHANGELOG.md | 8 ++++++++ lib/lib-storage/package.json | 2 +- packages/s3-presigned-post/CHANGELOG.md | 8 ++++++++ packages/s3-presigned-post/package.json | 2 +- packages/s3-request-presigner/CHANGELOG.md | 8 ++++++++ packages/s3-request-presigner/package.json | 2 +- 34 files changed, 187 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f4e6b194aee2e..a3ec66c91e380 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,24 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-athena:** This feature allows customers to retrieve runtime statistics for completed queries ([6fe9621](https://github.com/aws/aws-sdk-js-v3/commit/6fe9621621e896fa31ad115c3865a35f00b12d3a)) +* **client-cloudwatch:** Adding support for the suppression of Composite Alarm actions ([e94c855](https://github.com/aws/aws-sdk-js-v3/commit/e94c855cd87e381d4cbf34685fe4f68da1031ffa)) +* **client-docdb:** Enable copy-on-write restore type ([9173056](https://github.com/aws/aws-sdk-js-v3/commit/91730562c1fe693c0270b088e5bd0c02d50372d4)) +* **client-ec2-instance-connect:** This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs. ([a2aef86](https://github.com/aws/aws-sdk-js-v3/commit/a2aef86781ea0833877ada3ed3dcdbaab5d700ec)) +* **client-frauddetector:** The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs. ([b8837a8](https://github.com/aws/aws-sdk-js-v3/commit/b8837a8fe3ee514eed76a87d54e9165433ea0215)) +* **client-iotsitewise:** Added asynchronous API to ingest bulk historical and current data into IoT SiteWise. ([972d020](https://github.com/aws/aws-sdk-js-v3/commit/972d020ec9d8e713b9306aa8a39b036886127fc0)) +* **client-kendra:** Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html ([de0697a](https://github.com/aws/aws-sdk-js-v3/commit/de0697ad65ae98358ea534d9c2c90eadf4111bce)) +* **client-network-firewall:** Network Firewall now supports referencing dynamic IP sets from stateful rule groups, for IP sets stored in Amazon VPC prefix lists. ([7fb7490](https://github.com/aws/aws-sdk-js-v3/commit/7fb7490a4c093be17407d16f77151f3fa7c3e03a)) + + + + + # [3.134.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.133.0...v3.134.0) (2022-07-20) diff --git a/clients/client-athena/CHANGELOG.md b/clients/client-athena/CHANGELOG.md index 5319ee389921b..2f74ff4d41b4c 100644 --- a/clients/client-athena/CHANGELOG.md +++ b/clients/client-athena/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-athena:** This feature allows customers to retrieve runtime statistics for completed queries ([6fe9621](https://github.com/aws/aws-sdk-js-v3/commit/6fe9621621e896fa31ad115c3865a35f00b12d3a)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-athena diff --git a/clients/client-athena/package.json b/clients/client-athena/package.json index 308f6d054b560..099c525fbcf6f 100644 --- a/clients/client-athena/package.json +++ b/clients/client-athena/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-athena", "description": "AWS SDK for JavaScript Athena Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-cloudwatch/CHANGELOG.md b/clients/client-cloudwatch/CHANGELOG.md index ea8076fb32d0f..407fb84f2166c 100644 --- a/clients/client-cloudwatch/CHANGELOG.md +++ b/clients/client-cloudwatch/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-cloudwatch:** Adding support for the suppression of Composite Alarm actions ([e94c855](https://github.com/aws/aws-sdk-js-v3/commit/e94c855cd87e381d4cbf34685fe4f68da1031ffa)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-cloudwatch diff --git a/clients/client-cloudwatch/package.json b/clients/client-cloudwatch/package.json index 851c0fafb3c19..4a361dfa97907 100644 --- a/clients/client-cloudwatch/package.json +++ b/clients/client-cloudwatch/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-cloudwatch", "description": "AWS SDK for JavaScript Cloudwatch Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-database-migration-service/CHANGELOG.md b/clients/client-database-migration-service/CHANGELOG.md index 7fb137e9c9afc..28363d3f17286 100644 --- a/clients/client-database-migration-service/CHANGELOG.md +++ b/clients/client-database-migration-service/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + +**Note:** Version bump only for package @aws-sdk/client-database-migration-service + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-database-migration-service diff --git a/clients/client-database-migration-service/package.json b/clients/client-database-migration-service/package.json index e8bc3c51ad20e..407cd1d972282 100644 --- a/clients/client-database-migration-service/package.json +++ b/clients/client-database-migration-service/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-database-migration-service", "description": "AWS SDK for JavaScript Database Migration Service Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-docdb/CHANGELOG.md b/clients/client-docdb/CHANGELOG.md index 4c573fca1c96c..9aaa8afd8c36c 100644 --- a/clients/client-docdb/CHANGELOG.md +++ b/clients/client-docdb/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-docdb:** Enable copy-on-write restore type ([9173056](https://github.com/aws/aws-sdk-js-v3/commit/91730562c1fe693c0270b088e5bd0c02d50372d4)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-docdb diff --git a/clients/client-docdb/package.json b/clients/client-docdb/package.json index b2a52e1d7f63a..8d50248f3e3e1 100644 --- a/clients/client-docdb/package.json +++ b/clients/client-docdb/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-docdb", "description": "AWS SDK for JavaScript Docdb Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-ec2-instance-connect/CHANGELOG.md b/clients/client-ec2-instance-connect/CHANGELOG.md index 5fb77175a88be..b96e8a290707e 100644 --- a/clients/client-ec2-instance-connect/CHANGELOG.md +++ b/clients/client-ec2-instance-connect/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-ec2-instance-connect:** This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs. ([a2aef86](https://github.com/aws/aws-sdk-js-v3/commit/a2aef86781ea0833877ada3ed3dcdbaab5d700ec)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-ec2-instance-connect diff --git a/clients/client-ec2-instance-connect/package.json b/clients/client-ec2-instance-connect/package.json index 7b92af3809b1b..9f13fdfc8c4a2 100644 --- a/clients/client-ec2-instance-connect/package.json +++ b/clients/client-ec2-instance-connect/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-ec2-instance-connect", "description": "AWS SDK for JavaScript Ec2 Instance Connect Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-frauddetector/CHANGELOG.md b/clients/client-frauddetector/CHANGELOG.md index 2f49979b816b9..c8b36ca6d4161 100644 --- a/clients/client-frauddetector/CHANGELOG.md +++ b/clients/client-frauddetector/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-frauddetector:** The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs. ([b8837a8](https://github.com/aws/aws-sdk-js-v3/commit/b8837a8fe3ee514eed76a87d54e9165433ea0215)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-frauddetector diff --git a/clients/client-frauddetector/package.json b/clients/client-frauddetector/package.json index afa31f6fed997..235b3b8bde02b 100644 --- a/clients/client-frauddetector/package.json +++ b/clients/client-frauddetector/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-frauddetector", "description": "AWS SDK for JavaScript Frauddetector Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-iotsitewise/CHANGELOG.md b/clients/client-iotsitewise/CHANGELOG.md index 927d9b9d3af7c..5ed8e52e60d50 100644 --- a/clients/client-iotsitewise/CHANGELOG.md +++ b/clients/client-iotsitewise/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-iotsitewise:** Added asynchronous API to ingest bulk historical and current data into IoT SiteWise. ([972d020](https://github.com/aws/aws-sdk-js-v3/commit/972d020ec9d8e713b9306aa8a39b036886127fc0)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-iotsitewise diff --git a/clients/client-iotsitewise/package.json b/clients/client-iotsitewise/package.json index d4587f6d7e388..e21b29ae39248 100644 --- a/clients/client-iotsitewise/package.json +++ b/clients/client-iotsitewise/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-iotsitewise", "description": "AWS SDK for JavaScript Iotsitewise Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-kendra/CHANGELOG.md b/clients/client-kendra/CHANGELOG.md index c243f4e5768de..ab730c964342a 100644 --- a/clients/client-kendra/CHANGELOG.md +++ b/clients/client-kendra/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-kendra:** Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html ([de0697a](https://github.com/aws/aws-sdk-js-v3/commit/de0697ad65ae98358ea534d9c2c90eadf4111bce)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-kendra diff --git a/clients/client-kendra/package.json b/clients/client-kendra/package.json index 7b5060f4f748f..efb523554ae0f 100644 --- a/clients/client-kendra/package.json +++ b/clients/client-kendra/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-kendra", "description": "AWS SDK for JavaScript Kendra Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-lex-runtime-v2/CHANGELOG.md b/clients/client-lex-runtime-v2/CHANGELOG.md index 6e1b712b5ef5e..c13cff7ab3bf5 100644 --- a/clients/client-lex-runtime-v2/CHANGELOG.md +++ b/clients/client-lex-runtime-v2/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + +**Note:** Version bump only for package @aws-sdk/client-lex-runtime-v2 + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-lex-runtime-v2 diff --git a/clients/client-lex-runtime-v2/package.json b/clients/client-lex-runtime-v2/package.json index 952c6f9dafe5b..4cb72ece7c811 100644 --- a/clients/client-lex-runtime-v2/package.json +++ b/clients/client-lex-runtime-v2/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-lex-runtime-v2", "description": "AWS SDK for JavaScript Lex Runtime V2 Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-network-firewall/CHANGELOG.md b/clients/client-network-firewall/CHANGELOG.md index abb8b578e7c4e..d60d4a55672b0 100644 --- a/clients/client-network-firewall/CHANGELOG.md +++ b/clients/client-network-firewall/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-network-firewall:** Network Firewall now supports referencing dynamic IP sets from stateful rule groups, for IP sets stored in Amazon VPC prefix lists. ([7fb7490](https://github.com/aws/aws-sdk-js-v3/commit/7fb7490a4c093be17407d16f77151f3fa7c3e03a)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-network-firewall diff --git a/clients/client-network-firewall/package.json b/clients/client-network-firewall/package.json index e2673a52debee..82101d4829de9 100644 --- a/clients/client-network-firewall/package.json +++ b/clients/client-network-firewall/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-network-firewall", "description": "AWS SDK for JavaScript Network Firewall Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-rds/CHANGELOG.md b/clients/client-rds/CHANGELOG.md index aa905ae284eca..03abff6d074d9 100644 --- a/clients/client-rds/CHANGELOG.md +++ b/clients/client-rds/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + +**Note:** Version bump only for package @aws-sdk/client-rds + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-rds diff --git a/clients/client-rds/package.json b/clients/client-rds/package.json index 546b82595c00f..f7c47701fbfd9 100644 --- a/clients/client-rds/package.json +++ b/clients/client-rds/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-rds", "description": "AWS SDK for JavaScript Rds Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-s3/CHANGELOG.md b/clients/client-s3/CHANGELOG.md index 5d97c69c342cf..d470b9cfe7a0b 100644 --- a/clients/client-s3/CHANGELOG.md +++ b/clients/client-s3/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + +**Note:** Version bump only for package @aws-sdk/client-s3 + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-s3 diff --git a/clients/client-s3/package.json b/clients/client-s3/package.json index ae983e82481ba..7516e6638cb4d 100644 --- a/clients/client-s3/package.json +++ b/clients/client-s3/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-s3", "description": "AWS SDK for JavaScript S3 Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-transcribe-streaming/CHANGELOG.md b/clients/client-transcribe-streaming/CHANGELOG.md index 46062ff1fee6b..7d6d0cb459d9c 100644 --- a/clients/client-transcribe-streaming/CHANGELOG.md +++ b/clients/client-transcribe-streaming/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + +**Note:** Version bump only for package @aws-sdk/client-transcribe-streaming + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-transcribe-streaming diff --git a/clients/client-transcribe-streaming/package.json b/clients/client-transcribe-streaming/package.json index dc41deccd145a..8ae6cbb2e15f0 100644 --- a/clients/client-transcribe-streaming/package.json +++ b/clients/client-transcribe-streaming/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-transcribe-streaming", "description": "AWS SDK for JavaScript Transcribe Streaming Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/lerna.json b/lerna.json index 98f7010b674e1..cd410ae848187 100644 --- a/lerna.json +++ b/lerna.json @@ -1,5 +1,5 @@ { - "version": "3.134.0", + "version": "3.135.0", "npmClient": "yarn", "useWorkspaces": true, "command": { diff --git a/lib/lib-storage/CHANGELOG.md b/lib/lib-storage/CHANGELOG.md index 2b103938ae255..1086f0292789a 100644 --- a/lib/lib-storage/CHANGELOG.md +++ b/lib/lib-storage/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + +**Note:** Version bump only for package @aws-sdk/lib-storage + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) diff --git a/lib/lib-storage/package.json b/lib/lib-storage/package.json index 21df74eae3370..5ef1c7b21290c 100644 --- a/lib/lib-storage/package.json +++ b/lib/lib-storage/package.json @@ -1,6 +1,6 @@ { "name": "@aws-sdk/lib-storage", - "version": "3.131.0", + "version": "3.135.0", "description": "Storage higher order operation", "main": "./dist-cjs/index.js", "module": "./dist-es/index.js", diff --git a/packages/s3-presigned-post/CHANGELOG.md b/packages/s3-presigned-post/CHANGELOG.md index 790e42fd3883d..4109e64209d63 100644 --- a/packages/s3-presigned-post/CHANGELOG.md +++ b/packages/s3-presigned-post/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + +**Note:** Version bump only for package @aws-sdk/s3-presigned-post + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/s3-presigned-post diff --git a/packages/s3-presigned-post/package.json b/packages/s3-presigned-post/package.json index a52ccd82f80ef..b961ab500349c 100644 --- a/packages/s3-presigned-post/package.json +++ b/packages/s3-presigned-post/package.json @@ -1,6 +1,6 @@ { "name": "@aws-sdk/s3-presigned-post", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/packages/s3-request-presigner/CHANGELOG.md b/packages/s3-request-presigner/CHANGELOG.md index 8f444c10c5228..861037727330d 100644 --- a/packages/s3-request-presigner/CHANGELOG.md +++ b/packages/s3-request-presigner/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + +**Note:** Version bump only for package @aws-sdk/s3-request-presigner + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/s3-request-presigner diff --git a/packages/s3-request-presigner/package.json b/packages/s3-request-presigner/package.json index dee91967f5ed0..70a74cc83a68a 100644 --- a/packages/s3-request-presigner/package.json +++ b/packages/s3-request-presigner/package.json @@ -1,6 +1,6 @@ { "name": "@aws-sdk/s3-request-presigner", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json",