From e694134054db9c206aec4b6e6fa26b8ab807d6eb Mon Sep 17 00:00:00 2001
From: awstools
If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + *
+ */ + AutoMinorVersionUpgrade?: boolean; } export namespace ReplicationGroup { @@ -1708,7 +1714,7 @@ export interface Snapshot { * * *Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -1738,7 +1744,7 @@ export interface Snapshot {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -1853,7 +1859,7 @@ export interface Snapshot {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -2385,7 +2391,7 @@ export interface CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -2415,7 +2421,7 @@ export interface CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -2488,7 +2494,7 @@ export interface CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -2806,7 +2812,7 @@ export namespace CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -2836,7 +2842,7 @@ export namespace CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -2940,7 +2946,7 @@ export namespace CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -3296,7 +3302,7 @@ export interface CacheCluster {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -3326,7 +3332,7 @@ export interface CacheCluster {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -3431,7 +3437,7 @@ export interface CacheCluster {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -4799,7 +4805,7 @@ export interface CreateReplicationGroupMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -4829,7 +4835,7 @@ export interface CreateReplicationGroupMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -4934,7 +4940,7 @@ export interface CreateReplicationGroupMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -7459,7 +7465,7 @@ export interface DescribeReservedCacheNodesMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -7489,7 +7495,7 @@ export interface DescribeReservedCacheNodesMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -7593,7 +7599,7 @@ export interface DescribeReservedCacheNodesMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -7808,7 +7814,7 @@ export interface ReservedCacheNode {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -7838,7 +7844,7 @@ export interface ReservedCacheNode {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -7942,7 +7948,7 @@ export interface ReservedCacheNode {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -8171,7 +8177,7 @@ export interface DescribeReservedCacheNodesOfferingsMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -8201,7 +8207,7 @@ export interface DescribeReservedCacheNodesOfferingsMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -8307,7 +8313,7 @@ export interface DescribeReservedCacheNodesOfferingsMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -8494,7 +8500,7 @@ export interface ReservedCacheNodesOffering {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -8524,7 +8530,7 @@ export interface ReservedCacheNodesOffering {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -8630,7 +8636,7 @@ export interface ReservedCacheNodesOffering {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
diff --git a/clients/client-elasticache/src/protocols/Aws_query.ts b/clients/client-elasticache/src/protocols/Aws_query.ts
index 42d4d5e960cd1..3c7261af9158c 100644
--- a/clients/client-elasticache/src/protocols/Aws_query.ts
+++ b/clients/client-elasticache/src/protocols/Aws_query.ts
@@ -11326,6 +11326,7 @@ const deserializeAws_queryReplicationGroup = (output: any, context: __SerdeConte
LogDeliveryConfigurations: undefined,
ReplicationGroupCreateTime: undefined,
DataTiering: undefined,
+ AutoMinorVersionUpgrade: undefined,
};
if (output["ReplicationGroupId"] !== undefined) {
contents.ReplicationGroupId = __expectString(output["ReplicationGroupId"]);
@@ -11442,6 +11443,9 @@ const deserializeAws_queryReplicationGroup = (output: any, context: __SerdeConte
if (output["DataTiering"] !== undefined) {
contents.DataTiering = __expectString(output["DataTiering"]);
}
+ if (output["AutoMinorVersionUpgrade"] !== undefined) {
+ contents.AutoMinorVersionUpgrade = __parseBoolean(output["AutoMinorVersionUpgrade"]);
+ }
return contents;
};
diff --git a/codegen/sdk-codegen/aws-models/elasticache.json b/codegen/sdk-codegen/aws-models/elasticache.json
index ed1be721d66fc..83e6dd4205115 100644
--- a/codegen/sdk-codegen/aws-models/elasticache.json
+++ b/codegen/sdk-codegen/aws-models/elasticache.json
@@ -727,7 +727,7 @@
"CacheNodeType": {
"target": "com.amazonaws.elasticache#String",
"traits": {
- "smithy.api#documentation": "
The name of the compute and memory capacity node type for the cluster.
\n \nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\t\t \t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The name of the compute and memory capacity node type for the cluster.
\n \nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\t\t \t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
Represents an individual cache node within a cluster. Each cache node runs its own\n instance of the cluster's protocol-compliant caching software - either Memcached or\n Redis.
\n\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\t\t\t \t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
Represents an individual cache node within a cluster. Each cache node runs its own\n instance of the cluster's protocol-compliant caching software - either Memcached or\n Redis.
\n\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\t\t\t \t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n \t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\t\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n \t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\t\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): \n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\n\t\t\t\t\t \t\t \t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The compute and memory capacity of the nodes in the node group (shard).
\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): \n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\n\t\t\t\t\t \t\t \t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type filter value. \n Use this parameter to show only those reservations matching the specified cache node type.
\n \nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\t\n\t\t\t\t\t\t\t\t \t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type filter value. \n Use this parameter to show only those reservations matching the specified cache node type.
\n \nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\t\n\t\t\t\t\t\t\t\t \t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type filter value. \n Use this parameter to show only the available offerings matching the specified cache node type.
\n\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward)\tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\n\t\t\t\t\t \t\t\t \t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type filter value. \n Use this parameter to show only the available offerings matching the specified cache node type.
\n\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward)\tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\n\t\t\t\t\t \t\t\t \t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes.\n For more information, see Data tiering.
" } + }, + "AutoMinorVersionUpgrade": { + "target": "com.amazonaws.elasticache#Boolean", + "traits": { + "smithy.api#documentation": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. \n
" + } } }, "traits": { @@ -8728,7 +8734,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The cache node type for the reserved cache nodes.
\n \nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\t\n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type for the reserved cache nodes.
\n \nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\t\n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type for the reserved cache node.
\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\t\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The cache node type for the reserved cache node.
\nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types: (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): \tcache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t\t\tCurrent generation:
\n\n\t\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t cache.r6g.large
,\n\t\t\t\t\t\t\tcache.r6g.xlarge
,\n\t\t\t\t\t\t\tcache.r6g.2xlarge
,\n\t\t\t\t\t\t\tcache.r6g.4xlarge
,\n\t\t\t\t\t\t\tcache.r6g.8xlarge
,\n\t\t\t\t\t\t\tcache.r6g.12xlarge
,\n\t\t\t\t\t\t\tcache.r6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The name of the compute and memory capacity node type for the source cluster.
\n \nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\t\n cache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t \n\t\t \n\t\tCurrent generation:
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t cache.r6g.large
,\n\t\t cache.r6g.xlarge
,\n\t\t cache.r6g.2xlarge
,\n\t\t cache.r6g.4xlarge
,\n\t\t cache.r6g.8xlarge
,\n\t\t cache.r6g.12xlarge
,\n\t\t cache.r6g.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
For region availability, see Supported Node Types\n
\nFor region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
The name of the compute and memory capacity node type for the source cluster.
\n \nThe following node types are supported by ElastiCache. \n\t\t\t\tGenerally speaking, the current generation types provide more memory and computational power\n\t\t\tat lower cost when compared to their equivalent previous generation counterparts.
\n\t\tGeneral purpose:
\n\t\t\t\tCurrent generation:
\n\t\t\t\t\t \n\t\t\t\t\t\n M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\t\n cache.m6g.large
,\n\t\t\t\t\t\t\tcache.m6g.xlarge
,\n\t\t\t\t\t\t\tcache.m6g.2xlarge
,\n\t\t\t\t\t\t\tcache.m6g.4xlarge
,\n\t\t\t\t\t\t\tcache.m6g.8xlarge
,\n\t\t\t\t\t\t\tcache.m6g.12xlarge
,\n\t\t\t\t\t\t\tcache.m6g.16xlarge
\n\t\t\t\t\t\t\t\n\t\t\t\t\t
For region availability, see Supported Node Types\n
\n\n M5 node types:\n \t\t\t\t\t\t cache.m5.large
,\n \t\t\t\t\t\tcache.m5.xlarge
,\n \t\t\t\t\t\tcache.m5.2xlarge
,\n \t\t\t\t\t\tcache.m5.4xlarge
,\n \t\t\t\t\t\tcache.m5.12xlarge
,\n \t\t\t\t\t\tcache.m5.24xlarge
\n \t\t\t\t\t\t\n \t\t\t\t\t\t\n \t\t\t\t\t\t
\n M4 node types:\n \t\t\t\t\t\t cache.m4.large
,\n \t\t\t\t\t\tcache.m4.xlarge
,\n \t\t\t\t\t\tcache.m4.2xlarge
,\n \t\t\t\t\t\tcache.m4.4xlarge
,\n \t\t\t\t\t\tcache.m4.10xlarge
\n
\n T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):
\n\t\t\t\t\t \n\t\t\t\t\t \n\t\t\t\t\t\n\t\t\t\t\t cache.t4g.micro
,\n\t\t\t\t\t cache.t4g.small
,\n\t\t\t\t\t cache.t4g.medium
\n\t\t\t\t\t
\n T3 node types:\n\t\t\t\t\t cache.t3.micro
, \n \t\t\t\t\t\tcache.t3.small
,\n \t\t\t\t\t\tcache.t3.medium
\n
\n T2 node types:\n\t\t\t\t\t cache.t2.micro
, \n \t\t\t\t\t\tcache.t2.small
,\n \t\t\t\t\t\tcache.t2.medium
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\t\t\t\n T1 node types:\n\t\t\t\t\t cache.t1.micro
\n
\n M1 node types:\n\t\t\t\t\t\t cache.m1.small
, \n\t\t\t\t\t\t cache.m1.medium
, \n\t\t\t\t\t\t cache.m1.large
,\n\t\t\t\t\t\t cache.m1.xlarge
\n
\n M3 node types:\n \t\t\t\t\t\t cache.m3.medium
,\n \t\t\t\t\t\tcache.m3.large
, \n \t\t\t\t\t\tcache.m3.xlarge
,\n \t\t\t\t\t\tcache.m3.2xlarge
\n
Compute optimized:
\n\n\t\t\t\tPrevious generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\t\t\t\n C1 node types:\n\t\t\t cache.c1.xlarge
\n
Memory optimized with data tiering:
\n\t\tCurrent generation:
\n\t\t \n\t\t\n R6gd node types (available only for Redis engine version 6.2 onward).
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t \n\t\t cache.r6gd.xlarge
,\n\t\t cache.r6gd.2xlarge
,\n\t\t cache.r6gd.4xlarge
,\n\t\t cache.r6gd.8xlarge
,\n\t\t cache.r6gd.12xlarge
,\n\t\t cache.r6gd.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
Memory optimized:
\n\t\t \n\t\t \n\t\tCurrent generation:
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t\n R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
\n\t\t \n\t\t \n\t\t \n\t\t\t\n\t\t cache.r6g.large
,\n\t\t cache.r6g.xlarge
,\n\t\t cache.r6g.2xlarge
,\n\t\t cache.r6g.4xlarge
,\n\t\t cache.r6g.8xlarge
,\n\t\t cache.r6g.12xlarge
,\n\t\t cache.r6g.16xlarge
\n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t \n\t\t
For region availability, see Supported Node Types\n
\nFor region availability, see Supported Node Types\n
\n\n R5 node types:\n \t\t\t\t\t cache.r5.large
,\n \t\t\t\t\t cache.r5.xlarge
,\n \t\t\t\t\t cache.r5.2xlarge
,\n \t\t\t\t\t cache.r5.4xlarge
,\n \t\t\t\t\t cache.r5.12xlarge
,\n \t\t\t\t\t cache.r5.24xlarge
\n
\n R4 node types:\n \t\t\t\t\t cache.r4.large
,\n \t\t\t\t\t cache.r4.xlarge
,\n \t\t\t\t\t cache.r4.2xlarge
,\n \t\t\t\t\t cache.r4.4xlarge
,\n \t\t\t\t\t cache.r4.8xlarge
,\n \t\t\t\t\t cache.r4.16xlarge
\n
Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
\n\n M2 node types:\t\t\t\t\t\t\n \t\t\t\t\t cache.m2.xlarge
, \n \t\t\t\t\t\tcache.m2.2xlarge
,\n \t\t\t\t\t\tcache.m2.4xlarge
\n
\n R3 node types:\n \t\t\t\t\t cache.r3.large
, \n \t\t\t\t\t\tcache.r3.xlarge
,\n \t\t\t\t\t\tcache.r3.2xlarge
, \n \t\t\t\t\t\tcache.r3.4xlarge
,\n \t\t\t\t\t\tcache.r3.8xlarge
\n
\n Additional node type info\n
\n\t\tAll current generation instance types are created in Amazon VPC by default.
\nRedis append-only files (AOF) are not supported for T1 or T2 instances.
\nRedis Multi-AZ with automatic failover is not supported on T1 instances.
\nRedis configuration variables appendonly
and \n\t\t\t\tappendfsync
are not supported on Redis version 2.8.22 and later.
Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide. + *
Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide. *
*/ public allocateIpamPoolCidr( @@ -5146,7 +5146,7 @@ export class EC2 extends EC2Client { * to automate your IP address management workflows including assigning, tracking, * troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts * throughout your Amazon Web Services Organization. - *For more information, see Create an IPAM in the Amazon VPC IPAM User Guide. + *
For more information, see Create an IPAM in the Amazon VPC IPAM User Guide. *
*/ public createIpam(args: CreateIpamCommandInput, options?: __HttpHandlerOptions): PromiseCreate an IP address pool for Amazon VPC IP Address Manager (IPAM). In IPAM, a pool is a collection of contiguous IP addresses CIDRs. Pools enable you to organize your IP addresses according to your routing and security needs. For example, if you have separate routing and security needs for development and production applications, you can create a pool for each.
- *For more information, see Create a top-level pool in the Amazon VPC IPAM User Guide. + *
For more information, see Create a top-level pool in the Amazon VPC IPAM User Guide. *
*/ public createIpamPool( @@ -5208,7 +5208,7 @@ export class EC2 extends EC2Client { /** *Create an IPAM scope. In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict.
- *For more information, see Add a scope in the Amazon VPC IPAM User Guide.
+ *For more information, see Add a scope in the Amazon VPC IPAM User Guide.
*/ public createIpamScope( args: CreateIpamScopeCommandInput, @@ -5323,13 +5323,13 @@ export class EC2 extends EC2Client { } /** - *Creates a new version for a launch template. You can specify an existing version of + *
Creates a new version of a launch template. You can specify an existing version of * launch template from which to base the new version.
*Launch template versions are numbered in the order in which they are created. You * cannot specify, change, or replace the numbering of launch template versions.
*Launch templates are immutable; after you create a launch template, you can't modify it. * Instead, you can create a new version of the launch template that includes any changes you require.
- *For more information, see Modify a launch template (manage launch template versions)in the + *
For more information, see Modify a launch template (manage launch template versions) in the * Amazon Elastic Compute Cloud User Guide.
*/ public createLaunchTemplateVersion( @@ -6286,10 +6286,10 @@ export class EC2 extends EC2Client { * the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and * optional value. Tag keys must be unique per resource. * - *For more information about tags, see Tagging Your Resources in the + *
For more information about tags, see Tag your Amazon EC2 resources in the * Amazon Elastic Compute Cloud User Guide. For more information about * creating IAM policies that control users' access to resources based on tags, see Supported - * Resource-Level Permissions for Amazon EC2 API Actions in the Amazon + * resource-level permissions for Amazon EC2 API actions in the Amazon * Elastic Compute Cloud User Guide.
*/ public createTags(args: CreateTagsCommandInput, options?: __HttpHandlerOptions): PromiseRequests a transit gateway peering attachment between the specified transit gateway - * (requester) and a peer transit gateway (accepter). The transit gateways must be in - * different Regions. The peer transit gateway can be in your account or a different - * Amazon Web Services account.
+ * (requester) and a peer transit gateway (accepter). The peer transit gateway can be in + * your account or a different Amazon Web Services account. *After you create the peering attachment, the owner of the accepter transit gateway * must accept the attachment request.
*/ @@ -7596,7 +7595,7 @@ export class EC2 extends EC2Client { /** *Delete an IPAM. Deleting an IPAM removes all monitored data associated with the IPAM including the historical data for CIDRs.
- *For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide. + *
For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide. *
*/ public deleteIpam(args: DeleteIpamCommandInput, options?: __HttpHandlerOptions): PromiseFor more information, see Delete a pool in the Amazon VPC IPAM User Guide. + *
For more information, see Delete a pool in the Amazon VPC IPAM User Guide. *
*/ public deleteIpamPool( @@ -7663,7 +7662,7 @@ export class EC2 extends EC2Client { /** *Delete the scope for an IPAM. You cannot delete the default scopes.
- *For more information, see Delete a scope in the Amazon VPC IPAM User Guide. + *
For more information, see Delete a scope in the Amazon VPC IPAM User Guide. *
*/ public deleteIpamScope( @@ -8516,9 +8515,10 @@ export class EC2 extends EC2Client { /** *Deletes the specified set of tags from the specified set of resources.
- *To list the current tags, use DescribeTags. For more information about tags, see - * Tagging Your Resources - * in the Amazon Elastic Compute Cloud User Guide.
+ *To list the current tags, use DescribeTags. For more information about + * tags, see Tag + * your Amazon EC2 resources in the Amazon Elastic Compute Cloud User + * Guide.
*/ public deleteTags(args: DeleteTagsCommandInput, options?: __HttpHandlerOptions): PromiseDeprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
+ *Deprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
*/ public deprovisionIpamPoolCidr( args: DeprovisionIpamPoolCidrCommandInput, @@ -11504,7 +11504,7 @@ export class EC2 extends EC2Client { /** *Get information about your IPAM pools.
- *For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. + *
For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. *
*/ public describeIpams( @@ -13302,8 +13302,8 @@ export class EC2 extends EC2Client { /** *Describes the specified tags for your EC2 resources.
- *For more information about tags, see Tagging Your Resources in the - * Amazon Elastic Compute Cloud User Guide.
+ *For more information about tags, see Tag your Amazon EC2 resources in the + * Amazon Elastic Compute Cloud User Guide.
*/ public describeTags( args: DescribeTagsCommandInput, @@ -14722,7 +14722,7 @@ export class EC2 extends EC2Client { } /** - *Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. + *
Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. *
*/ public disableIpamOrganizationAdminAccount( @@ -15510,7 +15510,7 @@ export class EC2 extends EC2Client { } /** - *Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. + *
Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. *
*/ public enableIpamOrganizationAdminAccount( @@ -16406,7 +16406,7 @@ export class EC2 extends EC2Client { } /** - *Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
+ *Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
*/ public getIpamAddressHistory( args: GetIpamAddressHistoryCommandInput, @@ -16538,8 +16538,8 @@ export class EC2 extends EC2Client { * create a launch template. *This action calls on other describe actions to get instance information. Depending on
* your instance configuration, you may need to allow the following actions in your IAM
- * policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications,
- * DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, you can allow
+ * policy: DescribeSpotInstanceRequests
, DescribeInstanceCreditSpecifications
,
+ * DescribeVolumes
, DescribeInstanceAttribute
, and DescribeElasticGpus
. Or, you can allow
* describe*
depending on your instance requirements.
Modify the configurations of an IPAM pool.
- *For more information, see Modify a pool in the Amazon VPC IPAM User Guide. + *
For more information, see Modify a pool in the Amazon VPC IPAM User Guide. *
*/ public modifyIpamPool( @@ -18354,7 +18354,7 @@ export class EC2 extends EC2Client { /** *Modify a resource CIDR. You can use this action to transfer resource CIDRs between scopes and ignore resource CIDRs that you do not want to manage. If set to false, the resource will not be tracked for overlap, it cannot be auto-imported into a pool, and it will be removed from any pool it has an allocation in.
- *For more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
+ *For more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
*/ public modifyIpamResourceCidr( args: ModifyIpamResourceCidrCommandInput, @@ -19649,7 +19649,7 @@ export class EC2 extends EC2Client { /** *Move an BYOIP IPv4 CIDR to IPAM from a public IPv4 pool.
- *If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
+ *If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
*/ public moveByoipCidrToIpam( args: MoveByoipCidrToIpamCommandInput, @@ -19725,7 +19725,7 @@ export class EC2 extends EC2Client { /** *Provision a CIDR to an IPAM pool. You can use this action to provision new CIDRs to a top-level pool or to transfer a CIDR from a top-level pool to a pool within it.
- *For more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide. + *
For more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide. *
*/ public provisionIpamPoolCidr( @@ -19759,7 +19759,7 @@ export class EC2 extends EC2Client { /** *Provision a CIDR to a public IPv4 pool.
- *For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ public provisionPublicIpv4PoolCidr( args: ProvisionPublicIpv4PoolCidrCommandInput, @@ -19948,8 +19948,10 @@ export class EC2 extends EC2Client { * Creating your * own AMIs in the Amazon Elastic Compute Cloud User Guide. *For Amazon EBS-backed instances, CreateImage creates and registers - * the AMI in a single request, so you don't have to register the AMI yourself.
+ *For Amazon EBS-backed instances, CreateImage creates and registers the AMI + * in a single request, so you don't have to register the AMI yourself. We recommend that you + * always use CreateImage unless you have a specific reason to use + * RegisterImage.
*If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. @@ -20392,7 +20394,7 @@ export class EC2 extends EC2Client { } /** - *
Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide. + *
Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide. *
*/ public releaseIpamPoolAllocation( diff --git a/clients/client-ec2/src/commands/AllocateIpamPoolCidrCommand.ts b/clients/client-ec2/src/commands/AllocateIpamPoolCidrCommand.ts index 7245ccb26a0b9..a4e25ddb28091 100644 --- a/clients/client-ec2/src/commands/AllocateIpamPoolCidrCommand.ts +++ b/clients/client-ec2/src/commands/AllocateIpamPoolCidrCommand.ts @@ -23,7 +23,7 @@ export interface AllocateIpamPoolCidrCommandInput extends AllocateIpamPoolCidrRe export interface AllocateIpamPoolCidrCommandOutput extends AllocateIpamPoolCidrResult, __MetadataBearer {} /** - *Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide. + *
Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateIpamCommand.ts b/clients/client-ec2/src/commands/CreateIpamCommand.ts index 52ec06d931a34..cb3c4a7216b5e 100644 --- a/clients/client-ec2/src/commands/CreateIpamCommand.ts +++ b/clients/client-ec2/src/commands/CreateIpamCommand.ts @@ -24,7 +24,7 @@ export interface CreateIpamCommandOutput extends CreateIpamResult, __MetadataBea * to automate your IP address management workflows including assigning, tracking, * troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts * throughout your Amazon Web Services Organization. - *For more information, see Create an IPAM in the Amazon VPC IPAM User Guide. + *
For more information, see Create an IPAM in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateIpamPoolCommand.ts b/clients/client-ec2/src/commands/CreateIpamPoolCommand.ts index 5b2346dfefc1c..5a084d597d02e 100644 --- a/clients/client-ec2/src/commands/CreateIpamPoolCommand.ts +++ b/clients/client-ec2/src/commands/CreateIpamPoolCommand.ts @@ -21,7 +21,7 @@ export interface CreateIpamPoolCommandOutput extends CreateIpamPoolResult, __Met /** *Create an IP address pool for Amazon VPC IP Address Manager (IPAM). In IPAM, a pool is a collection of contiguous IP addresses CIDRs. Pools enable you to organize your IP addresses according to your routing and security needs. For example, if you have separate routing and security needs for development and production applications, you can create a pool for each.
- *For more information, see Create a top-level pool in the Amazon VPC IPAM User Guide. + *
For more information, see Create a top-level pool in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateIpamScopeCommand.ts b/clients/client-ec2/src/commands/CreateIpamScopeCommand.ts index c4584b845b713..95797f4754a7e 100644 --- a/clients/client-ec2/src/commands/CreateIpamScopeCommand.ts +++ b/clients/client-ec2/src/commands/CreateIpamScopeCommand.ts @@ -21,7 +21,7 @@ export interface CreateIpamScopeCommandOutput extends CreateIpamScopeResult, __M /** *Create an IPAM scope. In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict.
- *For more information, see Add a scope in the Amazon VPC IPAM User Guide.
+ *For more information, see Add a scope in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/CreateLaunchTemplateVersionCommand.ts b/clients/client-ec2/src/commands/CreateLaunchTemplateVersionCommand.ts index 4aec70bb89dff..0419e38ac49b7 100644 --- a/clients/client-ec2/src/commands/CreateLaunchTemplateVersionCommand.ts +++ b/clients/client-ec2/src/commands/CreateLaunchTemplateVersionCommand.ts @@ -23,13 +23,13 @@ export interface CreateLaunchTemplateVersionCommandInput extends CreateLaunchTem export interface CreateLaunchTemplateVersionCommandOutput extends CreateLaunchTemplateVersionResult, __MetadataBearer {} /** - *Creates a new version for a launch template. You can specify an existing version of + *
Creates a new version of a launch template. You can specify an existing version of * launch template from which to base the new version.
*Launch template versions are numbered in the order in which they are created. You * cannot specify, change, or replace the numbering of launch template versions.
*Launch templates are immutable; after you create a launch template, you can't modify it. * Instead, you can create a new version of the launch template that includes any changes you require.
- *For more information, see Modify a launch template (manage launch template versions)in the + *
For more information, see Modify a launch template (manage launch template versions) in the * Amazon Elastic Compute Cloud User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateTagsCommand.ts b/clients/client-ec2/src/commands/CreateTagsCommand.ts index f0a80958765cd..c4f28195405b6 100644 --- a/clients/client-ec2/src/commands/CreateTagsCommand.ts +++ b/clients/client-ec2/src/commands/CreateTagsCommand.ts @@ -25,10 +25,10 @@ export interface CreateTagsCommandOutput extends __MetadataBearer {} * the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and * optional value. Tag keys must be unique per resource. * - *For more information about tags, see Tagging Your Resources in the + *
For more information about tags, see Tag your Amazon EC2 resources in the * Amazon Elastic Compute Cloud User Guide. For more information about * creating IAM policies that control users' access to resources based on tags, see Supported - * Resource-Level Permissions for Amazon EC2 API Actions in the Amazon + * resource-level permissions for Amazon EC2 API actions in the Amazon * Elastic Compute Cloud User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateTransitGatewayPeeringAttachmentCommand.ts b/clients/client-ec2/src/commands/CreateTransitGatewayPeeringAttachmentCommand.ts index 056f79e4cf015..fb2928fa4b951 100644 --- a/clients/client-ec2/src/commands/CreateTransitGatewayPeeringAttachmentCommand.ts +++ b/clients/client-ec2/src/commands/CreateTransitGatewayPeeringAttachmentCommand.ts @@ -30,9 +30,8 @@ export interface CreateTransitGatewayPeeringAttachmentCommandOutput /** *Requests a transit gateway peering attachment between the specified transit gateway - * (requester) and a peer transit gateway (accepter). The transit gateways must be in - * different Regions. The peer transit gateway can be in your account or a different - * Amazon Web Services account.
+ * (requester) and a peer transit gateway (accepter). The peer transit gateway can be in + * your account or a different Amazon Web Services account. *After you create the peering attachment, the owner of the accepter transit gateway * must accept the attachment request.
* @example diff --git a/clients/client-ec2/src/commands/DeleteIpamCommand.ts b/clients/client-ec2/src/commands/DeleteIpamCommand.ts index cb4bcd974423a..e1ff203682cbc 100644 --- a/clients/client-ec2/src/commands/DeleteIpamCommand.ts +++ b/clients/client-ec2/src/commands/DeleteIpamCommand.ts @@ -21,7 +21,7 @@ export interface DeleteIpamCommandOutput extends DeleteIpamResult, __MetadataBea /** *Delete an IPAM. Deleting an IPAM removes all monitored data associated with the IPAM including the historical data for CIDRs.
- *For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide. + *
For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/DeleteIpamPoolCommand.ts b/clients/client-ec2/src/commands/DeleteIpamPoolCommand.ts index d7b5ad4b838cd..269ea5ff1147e 100644 --- a/clients/client-ec2/src/commands/DeleteIpamPoolCommand.ts +++ b/clients/client-ec2/src/commands/DeleteIpamPoolCommand.ts @@ -26,7 +26,7 @@ export interface DeleteIpamPoolCommandOutput extends DeleteIpamPoolResult, __Met * allocations, see ReleaseIpamPoolAllocation. To deprovision pool * CIDRs, see DeprovisionIpamPoolCidr. * - *For more information, see Delete a pool in the Amazon VPC IPAM User Guide. + *
For more information, see Delete a pool in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/DeleteIpamScopeCommand.ts b/clients/client-ec2/src/commands/DeleteIpamScopeCommand.ts index f2498be5adbab..ac1256fbf562e 100644 --- a/clients/client-ec2/src/commands/DeleteIpamScopeCommand.ts +++ b/clients/client-ec2/src/commands/DeleteIpamScopeCommand.ts @@ -21,7 +21,7 @@ export interface DeleteIpamScopeCommandOutput extends DeleteIpamScopeResult, __M /** *Delete the scope for an IPAM. You cannot delete the default scopes.
- *For more information, see Delete a scope in the Amazon VPC IPAM User Guide. + *
For more information, see Delete a scope in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/DeleteTagsCommand.ts b/clients/client-ec2/src/commands/DeleteTagsCommand.ts index 132dc05288827..e68c0ae21bcf2 100644 --- a/clients/client-ec2/src/commands/DeleteTagsCommand.ts +++ b/clients/client-ec2/src/commands/DeleteTagsCommand.ts @@ -21,9 +21,10 @@ export interface DeleteTagsCommandOutput extends __MetadataBearer {} /** *Deletes the specified set of tags from the specified set of resources.
- *To list the current tags, use DescribeTags. For more information about tags, see - * Tagging Your Resources - * in the Amazon Elastic Compute Cloud User Guide.
+ *To list the current tags, use DescribeTags. For more information about + * tags, see Tag + * your Amazon EC2 resources in the Amazon Elastic Compute Cloud User + * Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/DeprovisionIpamPoolCidrCommand.ts b/clients/client-ec2/src/commands/DeprovisionIpamPoolCidrCommand.ts index 4d3de1f4c0cf6..c7f145eacb27b 100644 --- a/clients/client-ec2/src/commands/DeprovisionIpamPoolCidrCommand.ts +++ b/clients/client-ec2/src/commands/DeprovisionIpamPoolCidrCommand.ts @@ -23,7 +23,7 @@ export interface DeprovisionIpamPoolCidrCommandInput extends DeprovisionIpamPool export interface DeprovisionIpamPoolCidrCommandOutput extends DeprovisionIpamPoolCidrResult, __MetadataBearer {} /** - *Deprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
+ *Deprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/DescribeIpamsCommand.ts b/clients/client-ec2/src/commands/DescribeIpamsCommand.ts index bf476b314c11d..47d82ffd217bf 100644 --- a/clients/client-ec2/src/commands/DescribeIpamsCommand.ts +++ b/clients/client-ec2/src/commands/DescribeIpamsCommand.ts @@ -21,7 +21,7 @@ export interface DescribeIpamsCommandOutput extends DescribeIpamsResult, __Metad /** *Get information about your IPAM pools.
- *For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. + *
For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/DescribeTagsCommand.ts b/clients/client-ec2/src/commands/DescribeTagsCommand.ts index 19bdc46058764..d54c5c45ee1e0 100644 --- a/clients/client-ec2/src/commands/DescribeTagsCommand.ts +++ b/clients/client-ec2/src/commands/DescribeTagsCommand.ts @@ -21,8 +21,8 @@ export interface DescribeTagsCommandOutput extends DescribeTagsResult, __Metadat /** *Describes the specified tags for your EC2 resources.
- *For more information about tags, see Tagging Your Resources in the - * Amazon Elastic Compute Cloud User Guide.
+ *For more information about tags, see Tag your Amazon EC2 resources in the + * Amazon Elastic Compute Cloud User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/DisableIpamOrganizationAdminAccountCommand.ts b/clients/client-ec2/src/commands/DisableIpamOrganizationAdminAccountCommand.ts index adb0f599a6996..d6b27338d1054 100644 --- a/clients/client-ec2/src/commands/DisableIpamOrganizationAdminAccountCommand.ts +++ b/clients/client-ec2/src/commands/DisableIpamOrganizationAdminAccountCommand.ts @@ -28,7 +28,7 @@ export interface DisableIpamOrganizationAdminAccountCommandOutput __MetadataBearer {} /** - *Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. + *
Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/EnableIpamOrganizationAdminAccountCommand.ts b/clients/client-ec2/src/commands/EnableIpamOrganizationAdminAccountCommand.ts index e0de927221e7c..142f6bcbe8175 100644 --- a/clients/client-ec2/src/commands/EnableIpamOrganizationAdminAccountCommand.ts +++ b/clients/client-ec2/src/commands/EnableIpamOrganizationAdminAccountCommand.ts @@ -28,7 +28,7 @@ export interface EnableIpamOrganizationAdminAccountCommandOutput __MetadataBearer {} /** - *Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. + *
Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/GetIpamAddressHistoryCommand.ts b/clients/client-ec2/src/commands/GetIpamAddressHistoryCommand.ts index 760cbf3ea650b..2bb51b0511a4c 100644 --- a/clients/client-ec2/src/commands/GetIpamAddressHistoryCommand.ts +++ b/clients/client-ec2/src/commands/GetIpamAddressHistoryCommand.ts @@ -23,7 +23,7 @@ export interface GetIpamAddressHistoryCommandInput extends GetIpamAddressHistory export interface GetIpamAddressHistoryCommandOutput extends GetIpamAddressHistoryResult, __MetadataBearer {} /** - *Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
+ *Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts b/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts index f6c2f0a3a6162..7e193507816fa 100644 --- a/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts +++ b/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts @@ -27,8 +27,8 @@ export interface GetLaunchTemplateDataCommandOutput extends GetLaunchTemplateDat * create a launch template. *This action calls on other describe actions to get instance information. Depending on
* your instance configuration, you may need to allow the following actions in your IAM
- * policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications,
- * DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, you can allow
+ * policy: DescribeSpotInstanceRequests
, DescribeInstanceCreditSpecifications
,
+ * DescribeVolumes
, DescribeInstanceAttribute
, and DescribeElasticGpus
. Or, you can allow
* describe*
depending on your instance requirements.
Modify the configurations of an IPAM pool.
- *For more information, see Modify a pool in the Amazon VPC IPAM User Guide. + *
For more information, see Modify a pool in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/ModifyIpamResourceCidrCommand.ts b/clients/client-ec2/src/commands/ModifyIpamResourceCidrCommand.ts index 7744b4467feb9..cf70ea1e5ed18 100644 --- a/clients/client-ec2/src/commands/ModifyIpamResourceCidrCommand.ts +++ b/clients/client-ec2/src/commands/ModifyIpamResourceCidrCommand.ts @@ -24,7 +24,7 @@ export interface ModifyIpamResourceCidrCommandOutput extends ModifyIpamResourceC /** *Modify a resource CIDR. You can use this action to transfer resource CIDRs between scopes and ignore resource CIDRs that you do not want to manage. If set to false, the resource will not be tracked for overlap, it cannot be auto-imported into a pool, and it will be removed from any pool it has an allocation in.
- *For more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
+ *For more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/MoveByoipCidrToIpamCommand.ts b/clients/client-ec2/src/commands/MoveByoipCidrToIpamCommand.ts index e6ba50796729f..9e36aa6fa7deb 100644 --- a/clients/client-ec2/src/commands/MoveByoipCidrToIpamCommand.ts +++ b/clients/client-ec2/src/commands/MoveByoipCidrToIpamCommand.ts @@ -24,7 +24,7 @@ export interface MoveByoipCidrToIpamCommandOutput extends MoveByoipCidrToIpamRes /** *Move an BYOIP IPv4 CIDR to IPAM from a public IPv4 pool.
- *If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
+ *If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/ProvisionIpamPoolCidrCommand.ts b/clients/client-ec2/src/commands/ProvisionIpamPoolCidrCommand.ts index a30192bc4fa88..a9f58a7048305 100644 --- a/clients/client-ec2/src/commands/ProvisionIpamPoolCidrCommand.ts +++ b/clients/client-ec2/src/commands/ProvisionIpamPoolCidrCommand.ts @@ -24,7 +24,7 @@ export interface ProvisionIpamPoolCidrCommandOutput extends ProvisionIpamPoolCid /** *Provision a CIDR to an IPAM pool. You can use this action to provision new CIDRs to a top-level pool or to transfer a CIDR from a top-level pool to a pool within it.
- *For more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide. + *
For more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/ProvisionPublicIpv4PoolCidrCommand.ts b/clients/client-ec2/src/commands/ProvisionPublicIpv4PoolCidrCommand.ts index 4adbe3dea85ee..ca84e64a92bd8 100644 --- a/clients/client-ec2/src/commands/ProvisionPublicIpv4PoolCidrCommand.ts +++ b/clients/client-ec2/src/commands/ProvisionPublicIpv4PoolCidrCommand.ts @@ -24,7 +24,7 @@ export interface ProvisionPublicIpv4PoolCidrCommandOutput extends ProvisionPubli /** *Provision a CIDR to a public IPv4 pool.
- *For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/RegisterImageCommand.ts b/clients/client-ec2/src/commands/RegisterImageCommand.ts index ebef959c42dbe..97f01755be2f3 100644 --- a/clients/client-ec2/src/commands/RegisterImageCommand.ts +++ b/clients/client-ec2/src/commands/RegisterImageCommand.ts @@ -25,8 +25,10 @@ export interface RegisterImageCommandOutput extends RegisterImageResult, __Metad * Creating your * own AMIs in the Amazon Elastic Compute Cloud User Guide. *For Amazon EBS-backed instances, CreateImage creates and registers - * the AMI in a single request, so you don't have to register the AMI yourself.
+ *For Amazon EBS-backed instances, CreateImage creates and registers the AMI + * in a single request, so you don't have to register the AMI yourself. We recommend that you + * always use CreateImage unless you have a specific reason to use + * RegisterImage.
*If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. diff --git a/clients/client-ec2/src/commands/ReleaseIpamPoolAllocationCommand.ts b/clients/client-ec2/src/commands/ReleaseIpamPoolAllocationCommand.ts index 40d31b28e4cc7..05248fc54f5e4 100644 --- a/clients/client-ec2/src/commands/ReleaseIpamPoolAllocationCommand.ts +++ b/clients/client-ec2/src/commands/ReleaseIpamPoolAllocationCommand.ts @@ -23,7 +23,7 @@ export interface ReleaseIpamPoolAllocationCommandInput extends ReleaseIpamPoolAl export interface ReleaseIpamPoolAllocationCommandOutput extends ReleaseIpamPoolAllocationResult, __MetadataBearer {} /** - *
Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide. + *
Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/models/models_0.ts b/clients/client-ec2/src/models/models_0.ts index c6200fe546eea..5f0df6b1fb4e6 100644 --- a/clients/client-ec2/src/models/models_0.ts +++ b/clients/client-ec2/src/models/models_0.ts @@ -373,7 +373,11 @@ export interface PeeringTgwInfo { */ TransitGatewayId?: string; + /** + *The ID of the core network where the transit gateway peer is located.
+ */ CoreNetworkId?: string; + /** *The ID of the Amazon Web Services account that owns the transit gateway.
*/ @@ -399,7 +403,13 @@ export enum DynamicRoutingValue { enable = "enable", } +/** + *Describes dynamic routing for the transit gateway peering attachment.
+ */ export interface TransitGatewayPeeringAttachmentOptions { + /** + *Describes whether dynamic routing is enabled or disabled for the transit gateway peering attachment.
+ */ DynamicRouting?: DynamicRoutingValue | string; } @@ -502,7 +512,11 @@ export interface TransitGatewayPeeringAttachment { */ AccepterTgwInfo?: PeeringTgwInfo; + /** + *Details about the transit gateway peering attachment.
+ */ Options?: TransitGatewayPeeringAttachmentOptions; + /** *The status of the transit gateway peering attachment.
*/ @@ -1866,7 +1880,7 @@ export enum ActivityStatus { /** *Add an operating Region to an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only * discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
- *For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. + *
For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. *
*/ export interface AddIpamOperatingRegion { @@ -2230,6 +2244,12 @@ export type ResourceType = /** *The tags to apply to a resource when the resource is being created.
+ *The Valid Values
lists all the resource types that can be tagged.
+ * However, the action you're using might not support tagging all of these resource types.
+ * If you try to tag a resource type that is unsupported for the action you're using,
+ * you'll get an error.
Associate a CIDR allocated from an IPv4 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *Associate a CIDR allocated from an IPv4 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv4IpamPoolId?: string; /** - *The netmask length of the IPv4 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide. + *
The netmask length of the IPv4 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide. *
*/ Ipv4NetmaskLength?: number; /** - *Associates a CIDR allocated from an IPv6 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *Associates a CIDR allocated from an IPv6 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv6IpamPoolId?: string; /** - *The netmask length of the IPv6 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *The netmask length of the IPv6 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv6NetmaskLength?: number; } diff --git a/clients/client-ec2/src/models/models_1.ts b/clients/client-ec2/src/models/models_1.ts index 6459ec109f167..d74f74f9f62a5 100644 --- a/clients/client-ec2/src/models/models_1.ts +++ b/clients/client-ec2/src/models/models_1.ts @@ -37,19 +37,22 @@ import { } from "./models_0"; /** - *Describes the Amazon EC2 launch template and the launch template version that can be used by
- * an EC2 Fleet to configure Amazon EC2 instances. For information about launch templates, see Launching
+ * The Amazon EC2 launch template that can be used by
+ * an EC2 Fleet to configure Amazon EC2 instances. You must specify either the ID or name of the launch template in the request, but not both. For information about launch templates, see Launch
* an instance from a launch template in the
* Amazon EC2 User Guide. The ID of the launch template. If you specify the template ID, you can't specify the template name. The ID of the launch template. You must specify the The name of the launch template. If you specify the template name, you can't specify the template ID. The name of the launch template. You must specify the The price protection threshold for Spot Instance. This is the maximum you’ll pay for an Spot Instance,
- * expressed as a percentage above the cheapest M, C, or R instance type with your specified
+ * expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified
* attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance
* types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,
- * expressed as a percentage above the cheapest M, C, or R instance type with your specified
+ * expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified
* attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance
* types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. The maximum price per unit hour that you are willing to pay for a Spot Instance. The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ * If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter. The maximum amount per hour for Spot Instances that you're willing to pay. The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend
+ * using this parameter because it can lead to increased interruptions. If you do not specify
+ * this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. Describes the Amazon EC2 launch template and the launch template version that can be used
- * by a Spot Fleet request to configure Amazon EC2 instances. For information about launch templates,
- * see Launching an instance from a launch template in the
+ * The Amazon EC2 launch template that can be used by
+ * a Spot Fleet to configure Amazon EC2 instances. You must specify either the ID or name of the launch template in the request, but not both. For information about launch templates,
+ * see Launch an instance from a launch template in the
* Amazon EC2 User Guide for Linux Instances. The ID of the launch template. If you specify the template ID, you can't specify the
- * template name. The ID of the launch template. You must specify the The name of the launch template. If you specify the template name, you can't specify
- * the template ID. The name of the launch template. You must specify the The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance,
- * expressed as a percentage above the cheapest M, C, or R instance type with your specified
+ * expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified
* attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance
* types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,
- * expressed as a percentage above the cheapest M, C, or R instance type with your specified
+ * expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified
* attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance
* types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. The maximum price per unit hour that you are willing to pay for a Spot Instance. The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ * If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter. The operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only
* discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
+ * For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
* The operating Regions for an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only
* discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. The number of scopes in the IPAM. The scope quota is 5. For more information on quotas, see Quotas in IPAM in the Amazon VPC IPAM User Guide.
+ * The number of scopes in the IPAM. The scope quota is 5. For more information on quotas, see Quotas in IPAM in the Amazon VPC IPAM User Guide.
* The operating Regions for an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only
* discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. The depth of pools in your IPAM pool. The pool depth quota is 10. For more information, see Quotas in IPAM in the Amazon VPC IPAM User Guide.
+ * The depth of pools in your IPAM pool. The pool depth quota is 10. For more information, see Quotas in IPAM in the Amazon VPC IPAM User Guide.
* In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict. For more information, see How IPAM works in the Amazon VPC IPAM User Guide. For more information, see How IPAM works in the Amazon VPC IPAM User Guide. The number of threads per CPU core. To disable multithreading for the instance,
- * specify a value of 1. Otherwise, specify the default value of 2.LaunchTemplateId
or the LaunchTemplateName
, but not both.LaunchTemplateName
or the LaunchTemplateId
, but not both.LaunchTemplateId
or the LaunchTemplateName
, but not both.LaunchTemplateName
or the LaunchTemplateId
, but not both.1
. Otherwise, specify the default value of 2
.
The credit option for CPU usage of a T2, T3, or T3a instance.
+ *The credit option for CPU usage of a T instance.
*/ export interface CreditSpecificationRequest { /** - *The credit option for CPU usage of a T2, T3, or T3a instance. Valid values are
- * standard
and unlimited
.
The credit option for CPU usage of a T instance.
+ *Valid values: standard
| unlimited
+ *
The maximum hourly price you're willing to pay for the Spot Instances.
+ *The maximum hourly price you're willing to pay for the Spot Instances. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
+ *The required duration for the Spot Instances (also known as Spot blocks), in minutes. - * This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).
+ *Deprecated.
*/ BlockDurationMinutes?: number; /** - *The end date of the request. For a one-time request, the request remains active until - * all instances launch, the request is canceled, or this date is reached. If the request - * is persistent, it remains active until it is canceled or this date and time is reached. - * The default end date is 7 days from the current date.
+ *The end date of the request, in UTC format + * (YYYY-MM-DDTHH:MM:SSZ). Supported only for + * persistent requests.
+ *For a persistent request, the request remains active until the ValidUntil
+ * date and time is reached. Otherwise, the request remains active until you cancel it.
For a one-time request, ValidUntil
is not supported. The request remains active until
+ * all instances launch or you cancel the request.
Default: 7 days from the current date
*/ ValidUntil?: Date; @@ -4141,7 +4172,7 @@ export enum LaunchTemplateInstanceMetadataTagsState { } /** - *The metadata options for the instance. For more information, see Instance Metadata and User Data in the + *
The metadata options for the instance. For more information, see Instance metadata and user data in the * Amazon Elastic Compute Cloud User Guide.
*/ export interface LaunchTemplateInstanceMetadataOptionsRequest { @@ -4163,7 +4194,8 @@ export interface LaunchTemplateInstanceMetadataOptionsRequest { /** *The desired HTTP PUT response hop limit for instance metadata requests. The larger the * number, the further instance metadata requests can travel.
- *Default: 1
+ *Default: 1
+ *
Possible values: Integers from 1 to 64
*/ HttpPutResponseHopLimit?: number; @@ -5002,14 +5034,14 @@ export interface CreateLaunchTemplateVersionRequest { ClientToken?: string; /** - *The ID of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The ID of the launch template.
+ *You must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The name of the launch template.
+ *You must specify the LaunchTemplateName
or the LaunchTemplateId
, but not both.
Describes the credit option for CPU usage of a T2, T3, or T3a instance.
+ *Describes the credit option for CPU usage of a T instance.
*/ export interface CreditSpecification { /** - *The credit option for CPU usage of a T2, T3, or T3a instance. Valid values are
- * standard
and unlimited
.
The credit option for CPU usage of a T instance.
+ *Valid values: standard
| unlimited
+ *
The maximum hourly price you're willing to pay for the Spot Instances.
+ *The maximum hourly price you're willing to pay for the Spot Instances. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
+ *The metadata options for the instance. For more information, see Instance Metadata and User Data in the + *
The metadata options for the instance. For more information, see Instance metadata and user data in the * Amazon Elastic Compute Cloud User Guide.
*/ export interface LaunchTemplateInstanceMetadataOptions { diff --git a/clients/client-ec2/src/models/models_2.ts b/clients/client-ec2/src/models/models_2.ts index 5c82044f3fcb1..7adc0e0860988 100644 --- a/clients/client-ec2/src/models/models_2.ts +++ b/clients/client-ec2/src/models/models_2.ts @@ -712,7 +712,13 @@ export namespace CreateTransitGatewayMulticastDomainResult { }); } +/** + *Describes whether dynamic routing is enabled or disabled for the transit gateway peering request.
+ */ export interface CreateTransitGatewayPeeringAttachmentRequestOptions { + /** + *Indicates whether dynamic routing is enabled or disabled.
+ */ DynamicRouting?: DynamicRoutingValue | string; } @@ -746,7 +752,11 @@ export interface CreateTransitGatewayPeeringAttachmentRequest { */ PeerRegion: string | undefined; + /** + *Requests a transit gateway peering attachment.
+ */ Options?: CreateTransitGatewayPeeringAttachmentRequestOptions; + /** *The tags to apply to the transit gateway peering attachment.
*/ @@ -1090,7 +1100,11 @@ export interface TransitGatewayRoute { */ PrefixListId?: string; + /** + *The ID of the transit gateway route table announcement.
+ */ TransitGatewayRouteTableAnnouncementId?: string; + /** *The attachments.
*/ @@ -1288,13 +1302,21 @@ export interface TransitGatewayRouteTableAnnouncement { */ TransitGatewayId?: string; + /** + *The ID of the core network for the transit gateway route table announcement.
+ */ CoreNetworkId?: string; + /** *The ID of the peer transit gateway.
*/ PeerTransitGatewayId?: string; + /** + *The ID of the core network ID for the peer.
+ */ PeerCoreNetworkId?: string; + /** *The ID of the peering attachment.
*/ @@ -1748,24 +1770,24 @@ export interface CreateVpcRequest { Ipv6CidrBlock?: string; /** - *The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. + *
The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. * *
*/ Ipv4IpamPoolId?: string; /** - *The netmask length of the IPv4 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *The netmask length of the IPv4 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv4NetmaskLength?: number; /** - *The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6 CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6 CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv6IpamPoolId?: string; /** - *The netmask length of the IPv6 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *The netmask length of the IPv6 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv6NetmaskLength?: number; @@ -4515,14 +4537,14 @@ export interface DeleteLaunchTemplateRequest { DryRun?: boolean; /** - *The ID of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The ID of the launch template.
+ *You must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The name of the launch template.
+ *You must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The ID of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The ID of the launch template.
+ *You must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The name of the launch template.
+ *You must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The maximum amount per hour for Spot Instances that you're willing to pay.
+ *The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify + * this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
+ *The ID of the launch template. To describe one or more versions of a specified launch - * template, you must specify either the launch template ID or the launch template name in - * the request. To describe all the latest or default launch template versions in your + *
The ID of the launch template.
+ *To describe one or more versions of a specified launch
+ * template, you must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
To describe all the latest or default launch template versions in your * account, you must omit this parameter.
*/ LaunchTemplateId?: string; /** - *The name of the launch template. To describe one or more versions of a specified - * launch template, you must specify either the launch template ID or the launch template - * name in the request. To describe all the latest or default launch template versions in + *
The name of the launch template.
+ *To describe one or more versions of a specified
+ * launch template, you must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
To describe all the latest or default launch template versions in * your account, you must omit this parameter.
*/ LaunchTemplateName?: string; @@ -10027,7 +10034,7 @@ export interface DescribeLaunchTemplateVersionsRequest { * version, the valid value is$Latest
. To describe all launch templates in
* your account that are defined as the default version, the valid value is
* $Default
. You can specify $Latest
and
- * $Default
in the same call. You cannot specify numbers.
+ * $Default
in the same request. You cannot specify numbers.
*/
Versions?: string[];
@@ -10075,7 +10082,7 @@ export interface DescribeLaunchTemplateVersionsRequest {
*
* http-protocol-ipv4
- Indicates whether the IPv4 endpoint for the
* instance metadata service is enabled (enabled
|
- * disabled
).
disabled
).
* diff --git a/clients/client-ec2/src/models/models_4.ts b/clients/client-ec2/src/models/models_4.ts index 364a3ef8814cf..d8f31298259f2 100644 --- a/clients/client-ec2/src/models/models_4.ts +++ b/clients/client-ec2/src/models/models_4.ts @@ -4511,10 +4511,11 @@ export interface SpotFleetLaunchSpecification { RamdiskId?: string; /** - *
The maximum price per unit hour that you are willing to pay for a Spot Instance.
- * If this value is not specified, the default is the Spot price specified for the fleet.
- * To determine the Spot price per unit hour, divide the Spot price by the
- * value of WeightedCapacity
.
The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to + * increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum price per unit hour that you are willing to pay for a Spot - * Instance.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to + * increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. The - * default is the On-Demand price.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum price per hour that you are willing to pay for a Spot Instance.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *Describes the maximum price per hour that you are willing to pay for a Spot - * Instance.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum price per hour that you are willing to pay for a Spot Instance.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The Amazon Resource Name (ARN) of the service.
+ *The name of the service.
*/ ServiceName?: string; diff --git a/clients/client-ec2/src/models/models_5.ts b/clients/client-ec2/src/models/models_5.ts index 97c931ac44d7d..0126f2f548476 100644 --- a/clients/client-ec2/src/models/models_5.ts +++ b/clients/client-ec2/src/models/models_5.ts @@ -1479,7 +1479,7 @@ export enum IpamAddressHistoryResourceType { } /** - *The historical record of a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide. + *
The historical record of a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide. *
*/ export interface IpamAddressHistoryRecord { @@ -1514,12 +1514,12 @@ export interface IpamAddressHistoryRecord { ResourceName?: string; /** - *The compliance status of a resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The compliance status of a resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ ResourceComplianceStatus?: IpamComplianceStatus | string; /** - *The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ ResourceOverlapStatus?: IpamOverlapStatus | string; @@ -1824,22 +1824,36 @@ export interface IpamResourceCidr { ResourceTags?: IpamResourceTag[]; /** - *The IP address space in the IPAM pool that is allocated to this resource. To convert the decimal to a percentage, multiply the decimal by 100.
+ *The percentage of IP address space in use. To convert the decimal to a percentage, multiply the decimal by 100. Note the following:
+ *For a resources that are VPCs, this is the percentage of IP address space in the VPC that's taken up by subnet CIDRs. + *
+ *For resources that are subnets, if the subnet has an IPv4 CIDR provisioned to it, this is the percentage of IPv4 address space in the subnet that's in use. If the subnet has an IPv6 CIDR provisioned to it, the percentage of IPv6 address space in use is not represented. The percentage of IPv6 address space in use cannot currently be calculated. + *
+ *For resources that are public IPv4 pools, this is the percentage of IP address space in the pool that's been allocated to Elastic IP addresses (EIPs). + *
+ *The compliance status of the IPAM resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The compliance status of the IPAM resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ ComplianceStatus?: IpamComplianceStatus | string; /** - *The management state of the resource. For more information about management states, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The management state of the resource. For more information about management states, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ ManagementState?: IpamManagementState | string; /** - *The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ OverlapStatus?: IpamOverlapStatus | string; @@ -2834,12 +2848,10 @@ export interface GetTransitGatewayMulticastDomainAssociationsRequest { *resource-type
- The type of resource. The valid value is: vpc
.
*
* state
- The state of the subnet association. Valid values are
- * associated
|
- * associating
- * | disassociated
| disassociating
.
associated
| associating
|
+ * disassociated
| disassociating
.
* @@ -6461,7 +6473,7 @@ export namespace ModifyInstancePlacementResult { /** *
Remove an operating Region from an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only * discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
- *For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide + *
For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide *
*/ export interface RemoveIpamOperatingRegion { @@ -6501,7 +6513,7 @@ export interface ModifyIpamRequest { /** *Choose the operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only * discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
- *For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
+ *For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
*/ AddOperatingRegions?: AddIpamOperatingRegion[]; @@ -6591,7 +6603,7 @@ export interface ModifyIpamPoolRequest { ClearAllocationDefaultNetmaskLength?: boolean; /** - *Add tag allocation rules to a pool. For more information about allocation rules, see Create a top-level pool in the Amazon VPC IPAM User Guide.
+ *Add tag allocation rules to a pool. For more information about allocation rules, see Create a top-level pool in the Amazon VPC IPAM User Guide.
*/ AddAllocationResourceTags?: RequestIpamResourceTag[]; @@ -6752,14 +6764,14 @@ export interface ModifyLaunchTemplateRequest { ClientToken?: string; /** - *The ID of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The ID of the launch template.
+ *You must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The name of the launch template.
+ *You must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The maximum price per hour that you are willing to pay for a Spot Instance. The - * default is the On-Demand price.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum hourly price you're willing to pay for the Spot Instances. The default is - * the On-Demand price.
+ *The maximum hourly price that you're willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
+ *The ID of the launch template.
+ *You must specify the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template.
+ *You must specify the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The tags to apply to the resources during launch. You can only tag instances and - * volumes on launch. The specified tags are applied to all instances or volumes that are - * created during launch. To tag a resource after it has been created, see CreateTags.
+ *The tags to apply to the resources that are created during instance launch.
+ *You can specify tags for the following resources only:
+ *Instances
+ *Volumes
+ *Elastic graphics
+ *Spot Instance requests
+ *Network interfaces
+ *To tag a resource after it has been created, see CreateTags.
*/ TagSpecifications?: TagSpecification[]; /** *The launch template to use to launch the instances. Any parameters that you specify in - * RunInstances override the same parameters in the launch template. + * RunInstances override the same parameters in the launch template. * You can specify either the name or ID of a launch template, but not both.
*/ LaunchTemplate?: LaunchTemplateSpecification; @@ -2757,10 +2782,9 @@ export interface RunInstancesRequest { * arestandard
and unlimited
. To change this attribute after
* launch, use
* ModifyInstanceCreditSpecification. For more information, see Burstable
- * performance instances in the Amazon EC2 User Guide.
+ * performance instances in the Amazon EC2 User Guide.
* Default: standard
(T2 instances) or unlimited
(T3/T3a
* instances)
For T3 instances with host
tenancy, only standard
is
* supported.
Indicates whether an instance is enabled for hibernation. For more information, see - * Hibernate + * Hibernate * your instance in the Amazon EC2 User Guide.
*You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same * instance.
@@ -2802,7 +2826,7 @@ export interface RunInstancesRequest { *Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For * more information, see What is Amazon Web Services Nitro * Enclaves? in the Amazon Web Services Nitro Enclaves User - * Guide.
+ * Guide. *You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same * instance.
*/ @@ -2822,7 +2846,7 @@ export interface RunInstancesRequest { /** *Indicates whether an instance is enabled for stop protection. For more information, * see Stop - * Protection. + * protection. *
*/ DisableApiStop?: boolean; diff --git a/codegen/sdk-codegen/aws-models/ec2.json b/codegen/sdk-codegen/aws-models/ec2.json index 5343d0c45b35b..0d1f4068ddd9e 100644 --- a/codegen/sdk-codegen/aws-models/ec2.json +++ b/codegen/sdk-codegen/aws-models/ec2.json @@ -805,7 +805,7 @@ } }, "traits": { - "smithy.api#documentation": "Add an operating Region to an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Add an operating Region to an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#AddIpamOperatingRegionSet": { @@ -1397,7 +1397,7 @@ "target": "com.amazonaws.ec2#AllocateIpamPoolCidrResult" }, "traits": { - "smithy.api#documentation": "Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#AllocateIpamPoolCidrRequest": { @@ -4733,25 +4733,25 @@ "Ipv4IpamPoolId": { "target": "com.amazonaws.ec2#IpamPoolId", "traits": { - "smithy.api#documentation": "Associate a CIDR allocated from an IPv4 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "Associate a CIDR allocated from an IPv4 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
" } }, "Ipv4NetmaskLength": { "target": "com.amazonaws.ec2#NetmaskLength", "traits": { - "smithy.api#documentation": "The netmask length of the IPv4 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "The netmask length of the IPv4 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.\n
" } }, "Ipv6IpamPoolId": { "target": "com.amazonaws.ec2#IpamPoolId", "traits": { - "smithy.api#documentation": "Associates a CIDR allocated from an IPv6 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "Associates a CIDR allocated from an IPv6 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
" } }, "Ipv6NetmaskLength": { "target": "com.amazonaws.ec2#NetmaskLength", "traits": { - "smithy.api#documentation": "The netmask length of the IPv6 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "The netmask length of the IPv6 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
" } } } @@ -11593,7 +11593,7 @@ "target": "com.amazonaws.ec2#CreateIpamResult" }, "traits": { - "smithy.api#documentation": "Create an IPAM. Amazon VPC IP Address Manager (IPAM) is a VPC feature that you can use\n to automate your IP address management workflows including assigning, tracking,\n troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts\n throughout your Amazon Web Services Organization.
\nFor more information, see Create an IPAM in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Create an IPAM. Amazon VPC IP Address Manager (IPAM) is a VPC feature that you can use\n to automate your IP address management workflows including assigning, tracking,\n troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts\n throughout your Amazon Web Services Organization.
\nFor more information, see Create an IPAM in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#CreateIpamPool": { @@ -11605,7 +11605,7 @@ "target": "com.amazonaws.ec2#CreateIpamPoolResult" }, "traits": { - "smithy.api#documentation": "Create an IP address pool for Amazon VPC IP Address Manager (IPAM). In IPAM, a pool is a collection of contiguous IP addresses CIDRs. Pools enable you to organize your IP addresses according to your routing and security needs. For example, if you have separate routing and security needs for development and production applications, you can create a pool for each.
\nFor more information, see Create a top-level pool in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Create an IP address pool for Amazon VPC IP Address Manager (IPAM). In IPAM, a pool is a collection of contiguous IP addresses CIDRs. Pools enable you to organize your IP addresses according to your routing and security needs. For example, if you have separate routing and security needs for development and production applications, you can create a pool for each.
\nFor more information, see Create a top-level pool in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#CreateIpamPoolRequest": { @@ -11739,7 +11739,7 @@ "OperatingRegions": { "target": "com.amazonaws.ec2#AddIpamOperatingRegionSet", "traits": { - "smithy.api#documentation": "The operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.\n
", + "smithy.api#documentation": "The operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.\n
", "smithy.api#xmlName": "OperatingRegion" } }, @@ -11781,7 +11781,7 @@ "target": "com.amazonaws.ec2#CreateIpamScopeResult" }, "traits": { - "smithy.api#documentation": "Create an IPAM scope. In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict.
\nFor more information, see Add a scope in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "Create an IPAM scope. In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict.
\nFor more information, see Add a scope in the Amazon VPC IPAM User Guide.
" } }, "com.amazonaws.ec2#CreateIpamScopeRequest": { @@ -11972,7 +11972,7 @@ "target": "com.amazonaws.ec2#CreateLaunchTemplateVersionResult" }, "traits": { - "smithy.api#documentation": "Creates a new version for a launch template. You can specify an existing version of\n launch template from which to base the new version.
\nLaunch template versions are numbered in the order in which they are created. You\n cannot specify, change, or replace the numbering of launch template versions.
\nLaunch templates are immutable; after you create a launch template, you can't modify it. \n Instead, you can create a new version of the launch template that includes any changes you require.
\nFor more information, see Modify a launch template (manage launch template versions)in the\n Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Creates a new version of a launch template. You can specify an existing version of\n launch template from which to base the new version.
\nLaunch template versions are numbered in the order in which they are created. You\n cannot specify, change, or replace the numbering of launch template versions.
\nLaunch templates are immutable; after you create a launch template, you can't modify it. \n Instead, you can create a new version of the launch template that includes any changes you require.
\nFor more information, see Modify a launch template (manage launch template versions) in the\n Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CreateLaunchTemplateVersionRequest": { @@ -11993,13 +11993,13 @@ "LaunchTemplateId": { "target": "com.amazonaws.ec2#LaunchTemplateId", "traits": { - "smithy.api#documentation": "The ID of the launch template. You must specify either the launch template ID or\n launch template name in the request.
" + "smithy.api#documentation": "The ID of the launch template.
\nYou must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or\n launch template name in the request.
" + "smithy.api#documentation": "The name of the launch template.
\nYou must specify the LaunchTemplateName
or the LaunchTemplateId
, but not both.
Adds or overwrites only the specified tags for the specified Amazon EC2 resource or\n resources. When you specify an existing tag key, the value is overwritten with\n the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and\n optional value. Tag keys must be unique per resource.
\n \nFor more information about tags, see Tagging Your Resources in the\n Amazon Elastic Compute Cloud User Guide. For more information about\n creating IAM policies that control users' access to resources based on tags, see Supported\n Resource-Level Permissions for Amazon EC2 API Actions in the Amazon\n Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Adds or overwrites only the specified tags for the specified Amazon EC2 resource or\n resources. When you specify an existing tag key, the value is overwritten with\n the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and\n optional value. Tag keys must be unique per resource.
\n \nFor more information about tags, see Tag your Amazon EC2 resources in the\n Amazon Elastic Compute Cloud User Guide. For more information about\n creating IAM policies that control users' access to resources based on tags, see Supported\n resource-level permissions for Amazon EC2 API actions in the Amazon\n Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CreateTagsRequest": { @@ -14503,7 +14503,7 @@ "target": "com.amazonaws.ec2#CreateTransitGatewayPeeringAttachmentResult" }, "traits": { - "smithy.api#documentation": "Requests a transit gateway peering attachment between the specified transit gateway\n (requester) and a peer transit gateway (accepter). The transit gateways must be in\n different Regions. The peer transit gateway can be in your account or a different \n Amazon Web Services account.
\nAfter you create the peering attachment, the owner of the accepter transit gateway \n must accept the attachment request.
" + "smithy.api#documentation": "Requests a transit gateway peering attachment between the specified transit gateway\n (requester) and a peer transit gateway (accepter). The peer transit gateway can be in \n your account or a different Amazon Web Services account.
\nAfter you create the peering attachment, the owner of the accepter transit gateway \n must accept the attachment request.
" } }, "com.amazonaws.ec2#CreateTransitGatewayPeeringAttachmentRequest": { @@ -14538,7 +14538,10 @@ } }, "Options": { - "target": "com.amazonaws.ec2#CreateTransitGatewayPeeringAttachmentRequestOptions" + "target": "com.amazonaws.ec2#CreateTransitGatewayPeeringAttachmentRequestOptions", + "traits": { + "smithy.api#documentation": "Requests a transit gateway peering attachment.
" + } }, "TagSpecifications": { "target": "com.amazonaws.ec2#TagSpecificationList", @@ -14559,8 +14562,14 @@ "type": "structure", "members": { "DynamicRouting": { - "target": "com.amazonaws.ec2#DynamicRoutingValue" + "target": "com.amazonaws.ec2#DynamicRoutingValue", + "traits": { + "smithy.api#documentation": "Indicates whether dynamic routing is enabled or disabled.
" + } } + }, + "traits": { + "smithy.api#documentation": "Describes whether dynamic routing is enabled or disabled for the transit gateway peering request.
" } }, "com.amazonaws.ec2#CreateTransitGatewayPeeringAttachmentResult": { @@ -15561,25 +15570,25 @@ "Ipv4IpamPoolId": { "target": "com.amazonaws.ec2#IpamPoolId", "traits": { - "smithy.api#documentation": "The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.\n \n
" + "smithy.api#documentation": "The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.\n \n
" } }, "Ipv4NetmaskLength": { "target": "com.amazonaws.ec2#NetmaskLength", "traits": { - "smithy.api#documentation": "The netmask length of the IPv4 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "The netmask length of the IPv4 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
" } }, "Ipv6IpamPoolId": { "target": "com.amazonaws.ec2#IpamPoolId", "traits": { - "smithy.api#documentation": "The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6 CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6 CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.
" } }, "Ipv6NetmaskLength": { "target": "com.amazonaws.ec2#NetmaskLength", "traits": { - "smithy.api#documentation": "The netmask length of the IPv6 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "The netmask length of the IPv6 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
" } }, "DryRun": { @@ -15822,13 +15831,13 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "CpuCredits", - "smithy.api#documentation": "The credit option for CPU usage of a T2, T3, or T3a instance. Valid values are\n standard
and unlimited
.
The credit option for CPU usage of a T instance.
\nValid values: standard
| unlimited
\n
Describes the credit option for CPU usage of a T2, T3, or T3a instance.
" + "smithy.api#documentation": "Describes the credit option for CPU usage of a T instance.
" } }, "com.amazonaws.ec2#CreditSpecificationRequest": { @@ -15837,13 +15846,13 @@ "CpuCredits": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The credit option for CPU usage of a T2, T3, or T3a instance. Valid values are\n standard
and unlimited
.
The credit option for CPU usage of a T instance.
\nValid values: standard
| unlimited
\n
The credit option for CPU usage of a T2, T3, or T3a instance.
" + "smithy.api#documentation": "The credit option for CPU usage of a T instance.
" } }, "com.amazonaws.ec2#CurrencyCodeValues": { @@ -16678,7 +16687,7 @@ "target": "com.amazonaws.ec2#DeleteIpamResult" }, "traits": { - "smithy.api#documentation": "Delete an IPAM. Deleting an IPAM removes all monitored data associated with the IPAM including the historical data for CIDRs.
\nFor more information, see Delete an IPAM in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Delete an IPAM. Deleting an IPAM removes all monitored data associated with the IPAM including the historical data for CIDRs.
\nFor more information, see Delete an IPAM in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#DeleteIpamPool": { @@ -16690,7 +16699,7 @@ "target": "com.amazonaws.ec2#DeleteIpamPoolResult" }, "traits": { - "smithy.api#documentation": "Delete an IPAM pool.
\nYou cannot delete an IPAM pool if there are allocations in it or CIDRs provisioned to it. To release \n allocations, see ReleaseIpamPoolAllocation. To deprovision pool \n CIDRs, see DeprovisionIpamPoolCidr.
\nFor more information, see Delete a pool in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Delete an IPAM pool.
\nYou cannot delete an IPAM pool if there are allocations in it or CIDRs provisioned to it. To release \n allocations, see ReleaseIpamPoolAllocation. To deprovision pool \n CIDRs, see DeprovisionIpamPoolCidr.
\nFor more information, see Delete a pool in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#DeleteIpamPoolRequest": { @@ -16770,7 +16779,7 @@ "target": "com.amazonaws.ec2#DeleteIpamScopeResult" }, "traits": { - "smithy.api#documentation": "Delete the scope for an IPAM. You cannot delete the default scopes.
\nFor more information, see Delete a scope in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Delete the scope for an IPAM. You cannot delete the default scopes.
\nFor more information, see Delete a scope in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#DeleteIpamScopeRequest": { @@ -16865,13 +16874,13 @@ "LaunchTemplateId": { "target": "com.amazonaws.ec2#LaunchTemplateId", "traits": { - "smithy.api#documentation": "The ID of the launch template. You must specify either the launch template ID or\n launch template name in the request.
" + "smithy.api#documentation": "The ID of the launch template.
\nYou must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or\n launch template name in the request.
" + "smithy.api#documentation": "The name of the launch template.
\nYou must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The ID of the launch template. You must specify either the launch template ID or\n launch template name in the request.
" + "smithy.api#documentation": "The ID of the launch template.
\nYou must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or\n launch template name in the request.
" + "smithy.api#documentation": "The name of the launch template.
\nYou must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
Deletes the specified set of tags from the specified set of resources.
\nTo list the current tags, use DescribeTags. For more information about tags, see \n Tagging Your Resources \n in the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Deletes the specified set of tags from the specified set of resources.
\nTo list the current tags, use DescribeTags. For more information about\n tags, see Tag\n your Amazon EC2 resources in the Amazon Elastic Compute Cloud User\n Guide.
" } }, "com.amazonaws.ec2#DeleteTagsRequest": { @@ -19128,7 +19137,7 @@ "target": "com.amazonaws.ec2#DeprovisionIpamPoolCidrResult" }, "traits": { - "smithy.api#documentation": "Deprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "Deprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
" } }, "com.amazonaws.ec2#DeprovisionIpamPoolCidrRequest": { @@ -24028,7 +24037,7 @@ "target": "com.amazonaws.ec2#DescribeIpamsResult" }, "traits": { - "smithy.api#documentation": "Get information about your IPAM pools.
\nFor more information, see What is IPAM? in the Amazon VPC IPAM User Guide.\n
", + "smithy.api#documentation": "Get information about your IPAM pools.
\nFor more information, see What is IPAM? in the Amazon VPC IPAM User Guide.\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -24290,19 +24299,19 @@ "LaunchTemplateId": { "target": "com.amazonaws.ec2#LaunchTemplateId", "traits": { - "smithy.api#documentation": "The ID of the launch template. To describe one or more versions of a specified launch\n template, you must specify either the launch template ID or the launch template name in\n the request. To describe all the latest or default launch template versions in your\n account, you must omit this parameter.
" + "smithy.api#documentation": "The ID of the launch template.
\nTo describe one or more versions of a specified launch\n template, you must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
To describe all the latest or default launch template versions in your\n account, you must omit this parameter.
" } }, "LaunchTemplateName": { "target": "com.amazonaws.ec2#LaunchTemplateName", "traits": { - "smithy.api#documentation": "The name of the launch template. To describe one or more versions of a specified\n launch template, you must specify either the launch template ID or the launch template\n name in the request. To describe all the latest or default launch template versions in\n your account, you must omit this parameter.
" + "smithy.api#documentation": "The name of the launch template.
\nTo describe one or more versions of a specified\n launch template, you must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
To describe all the latest or default launch template versions in\n your account, you must omit this parameter.
" } }, "Versions": { "target": "com.amazonaws.ec2#VersionStringList", "traits": { - "smithy.api#documentation": "One or more versions of the launch template. Valid values depend on whether you are\n describing a specified launch template (by ID or name) or all launch templates in your\n account.
\nTo describe one or more versions of a specified launch template, valid values are\n $Latest
, $Default
, and numbers.
To describe all launch templates in your account that are defined as the latest\n version, the valid value is $Latest
. To describe all launch templates in\n your account that are defined as the default version, the valid value is\n $Default
. You can specify $Latest
and\n $Default
in the same call. You cannot specify numbers.
One or more versions of the launch template. Valid values depend on whether you are\n describing a specified launch template (by ID or name) or all launch templates in your\n account.
\nTo describe one or more versions of a specified launch template, valid values are\n $Latest
, $Default
, and numbers.
To describe all launch templates in your account that are defined as the latest\n version, the valid value is $Latest
. To describe all launch templates in\n your account that are defined as the default version, the valid value is\n $Default
. You can specify $Latest
and\n $Default
in the same request. You cannot specify numbers.
One or more filters.
\n\n create-time
- The time the launch template version was\n created.
\n ebs-optimized
- A boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.
\n http-endpoint
- Indicates whether the HTTP metadata endpoint on\n your instances is enabled (enabled
| disabled
).
\n http-protocol-ipv4
- Indicates whether the IPv4 endpoint for the\n instance metadata service is enabled (enabled
|\n disabled
).
\n host-resource-group-arn
- The ARN of the host resource group in\n which to launch the instances.
\n http-tokens
- The state of token usage for your instance metadata\n requests (optional
| required
).
\n iam-instance-profile
- The ARN of the IAM instance\n profile.
\n image-id
- The ID of the AMI.
\n instance-type
- The instance type.
\n is-default-version
- A boolean that indicates whether the launch\n template version is the default version.
\n kernel-id
- The kernel ID.
\n license-configuration-arn
- The ARN of the license\n configuration.
\n network-card-index
- The index of the network card.
\n ram-disk-id
- The RAM disk ID.
One or more filters.
\n\n create-time
- The time the launch template version was\n created.
\n ebs-optimized
- A boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.
\n http-endpoint
- Indicates whether the HTTP metadata endpoint on\n your instances is enabled (enabled
| disabled
).
\n http-protocol-ipv4
- Indicates whether the IPv4 endpoint for the\n instance metadata service is enabled (enabled
|\n disabled
).
\n host-resource-group-arn
- The ARN of the host resource group in\n which to launch the instances.
\n http-tokens
- The state of token usage for your instance metadata\n requests (optional
| required
).
\n iam-instance-profile
- The ARN of the IAM instance\n profile.
\n image-id
- The ID of the AMI.
\n instance-type
- The instance type.
\n is-default-version
- A boolean that indicates whether the launch\n template version is the default version.
\n kernel-id
- The kernel ID.
\n license-configuration-arn
- The ARN of the license\n configuration.
\n network-card-index
- The index of the network card.
\n ram-disk-id
- The RAM disk ID.
Describes the specified tags for your EC2 resources.
\nFor more information about tags, see Tagging Your Resources in the\n Amazon Elastic Compute Cloud User Guide.
", + "smithy.api#documentation": "Describes the specified tags for your EC2 resources.
\nFor more information about tags, see Tag your Amazon EC2 resources in the\n Amazon Elastic Compute Cloud User Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -31960,7 +31969,7 @@ "target": "com.amazonaws.ec2#DisableIpamOrganizationAdminAccountResult" }, "traits": { - "smithy.api#documentation": "Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#DisableIpamOrganizationAdminAccountRequest": { @@ -34379,7 +34388,7 @@ "target": "com.amazonaws.ec2#EnableIpamOrganizationAdminAccountResult" }, "traits": { - "smithy.api#documentation": "Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#EnableIpamOrganizationAdminAccountRequest": { @@ -36733,7 +36742,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "MaxPrice", - "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance.
", + "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.\n
\nIf you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
\nThe maximum price per unit hour that you are willing to pay for a Spot Instance.
" + "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.\n
\nIf you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
\nThe ID of the launch template. If you specify the template ID, you can't specify the\n template name.
", + "smithy.api#documentation": "The ID of the launch template.
\nYou must specify the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. If you specify the template name, you can't specify\n the template ID.
", + "smithy.api#documentation": "The name of the launch template.
\nYou must specify the LaunchTemplateName
or the LaunchTemplateId
, but not both.
Describes the Amazon EC2 launch template and the launch template version that can be used\n by a Spot Fleet request to configure Amazon EC2 instances. For information about launch templates,\n see Launching an instance from a launch template in the\n Amazon EC2 User Guide for Linux Instances.
" + "smithy.api#documentation": "The Amazon EC2 launch template that can be used by\n a Spot Fleet to configure Amazon EC2 instances. You must specify either the ID or name of the launch template in the request, but not both.
\nFor information about launch templates,\n see Launch an instance from a launch template in the\n Amazon EC2 User Guide for Linux Instances.
" } }, "com.amazonaws.ec2#FleetLaunchTemplateSpecificationRequest": { @@ -36902,13 +36911,13 @@ "LaunchTemplateId": { "target": "com.amazonaws.ec2#LaunchTemplateId", "traits": { - "smithy.api#documentation": "The ID of the launch template. If you specify the template ID, you can't specify the template name.
" + "smithy.api#documentation": "The ID of the launch template.
\nYou must specify the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. If you specify the template name, you can't specify the template ID.
" + "smithy.api#documentation": "The name of the launch template.
\nYou must specify the LaunchTemplateName
or the LaunchTemplateId
, but not both.
Describes the Amazon EC2 launch template and the launch template version that can be used by\n an EC2 Fleet to configure Amazon EC2 instances. For information about launch templates, see Launching\n an instance from a launch template in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "The Amazon EC2 launch template that can be used by\n an EC2 Fleet to configure Amazon EC2 instances. You must specify either the ID or name of the launch template in the request, but not both.
\nFor information about launch templates, see Launch\n an instance from a launch template in the\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#FleetOnDemandAllocationStrategy": { @@ -38591,7 +38600,7 @@ "target": "com.amazonaws.ec2#GetIpamAddressHistoryResult" }, "traits": { - "smithy.api#documentation": "Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
", + "smithy.api#documentation": "Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -38955,7 +38964,7 @@ "target": "com.amazonaws.ec2#GetLaunchTemplateDataResult" }, "traits": { - "smithy.api#documentation": "Retrieves the configuration data of the specified instance. You can use this data to\n create a launch template.
\nThis action calls on other describe actions to get instance information. Depending on\n your instance configuration, you may need to allow the following actions in your IAM\n policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications,\n DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, you can allow\n describe*
depending on your instance requirements.
Retrieves the configuration data of the specified instance. You can use this data to\n create a launch template.
\nThis action calls on other describe actions to get instance information. Depending on\n your instance configuration, you may need to allow the following actions in your IAM\n policy: DescribeSpotInstanceRequests
, DescribeInstanceCreditSpecifications
,\n DescribeVolumes
, DescribeInstanceAttribute
, and DescribeElasticGpus
. Or, you can allow\n describe*
depending on your instance requirements.
One or more filters. The possible values are:
\n\n resource-id
- The ID of the resource.
\n resource-type
- The type of resource. The valid value is: vpc
.
\n state
- The state of the subnet association. Valid values are\n associated
|\n associating
\n | disassociated
| disassociating
.
\n subnet-id
- The ID of the subnet.
\n transit-gateway-attachment-id
- The id of the transit gateway attachment.
One or more filters. The possible values are:
\n\n resource-id
- The ID of the resource.
\n resource-type
- The type of resource. The valid value is: vpc
.
\n state
- The state of the subnet association. Valid values are\n associated
| associating
|\n disassociated
| disassociating
.
\n subnet-id
- The ID of the subnet.
\n transit-gateway-attachment-id
- The id of the transit gateway attachment.
The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nTo turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
\nIf you set TargetCapacityUnitType
to vcpu
or\n memory-mib
, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.
Default: 100
\n
The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance,\n expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nTo turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
\nIf you set TargetCapacityUnitType
to vcpu
or\n memory-mib
, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.
Default: 100
\n
The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nTo turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
\nIf you set TargetCapacityUnitType
to vcpu
or\n memory-mib
, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.
Default: 20
\n
The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,\n expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nTo turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
\nIf you set TargetCapacityUnitType
to vcpu
or\n memory-mib
, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.
Default: 20
\n
The price protection threshold for Spot Instance. This is the maximum you’ll pay for an Spot Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nTo turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
\nIf you set TargetCapacityUnitType
to vcpu
or\n memory-mib
, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.
Default: 100
\n
The price protection threshold for Spot Instance. This is the maximum you’ll pay for an Spot Instance,\n expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nTo turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
\nIf you set TargetCapacityUnitType
to vcpu
or\n memory-mib
, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.
Default: 100
\n
The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,\n expressed as a percentage above the cheapest M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nTo turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
\nIf you set TargetCapacityUnitType
to vcpu
or\n memory-mib
, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.
Default: 20
\n
The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,\n expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified\n attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance\n types priced above your threshold.
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nTo turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
\nIf you set TargetCapacityUnitType
to vcpu
or\n memory-mib
, the price protection threshold is applied based on the\n per-vCPU or per-memory price instead of the per-instance price.
Default: 20
\n
The number of scopes in the IPAM. The scope quota is 5. For more information on quotas, see Quotas in IPAM in the Amazon VPC IPAM User Guide.\n
", + "smithy.api#documentation": "The number of scopes in the IPAM. The scope quota is 5. For more information on quotas, see Quotas in IPAM in the Amazon VPC IPAM User Guide.\n
", "smithy.api#xmlName": "scopeCount" } }, @@ -49063,7 +49072,7 @@ "target": "com.amazonaws.ec2#IpamOperatingRegionSet", "traits": { "aws.protocols#ec2QueryName": "OperatingRegionSet", - "smithy.api#documentation": "The operating Regions for an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
", + "smithy.api#documentation": "The operating Regions for an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
", "smithy.api#xmlName": "operatingRegionSet" } }, @@ -49085,7 +49094,7 @@ } }, "traits": { - "smithy.api#documentation": "IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.
" } }, "com.amazonaws.ec2#IpamAddressHistoryMaxResults": { @@ -49152,7 +49161,7 @@ "target": "com.amazonaws.ec2#IpamComplianceStatus", "traits": { "aws.protocols#ec2QueryName": "ResourceComplianceStatus", - "smithy.api#documentation": "The compliance status of a resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", + "smithy.api#documentation": "The compliance status of a resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", "smithy.api#xmlName": "resourceComplianceStatus" } }, @@ -49160,7 +49169,7 @@ "target": "com.amazonaws.ec2#IpamOverlapStatus", "traits": { "aws.protocols#ec2QueryName": "ResourceOverlapStatus", - "smithy.api#documentation": "The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", + "smithy.api#documentation": "The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", "smithy.api#xmlName": "resourceOverlapStatus" } }, @@ -49190,7 +49199,7 @@ } }, "traits": { - "smithy.api#documentation": "The historical record of a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "The historical record of a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#IpamAddressHistoryRecordSet": { @@ -49325,7 +49334,7 @@ } }, "traits": { - "smithy.api#documentation": "The operating Regions for an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "The operating Regions for an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
" } }, "com.amazonaws.ec2#IpamOperatingRegionSet": { @@ -49435,7 +49444,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "PoolDepth", - "smithy.api#documentation": "The depth of pools in your IPAM pool. The pool depth quota is 10. For more information, see Quotas in IPAM in the Amazon VPC IPAM User Guide.\n
", + "smithy.api#documentation": "The depth of pools in your IPAM pool. The pool depth quota is 10. For more information, see Quotas in IPAM in the Amazon VPC IPAM User Guide.\n
", "smithy.api#xmlName": "poolDepth" } }, @@ -49928,7 +49937,7 @@ "target": "com.amazonaws.ec2#BoxedDouble", "traits": { "aws.protocols#ec2QueryName": "IpUsage", - "smithy.api#documentation": "The IP address space in the IPAM pool that is allocated to this resource. To convert the decimal to a percentage, multiply the decimal by 100.
", + "smithy.api#documentation": "The percentage of IP address space in use. To convert the decimal to a percentage, multiply the decimal by 100. Note the following:
\nFor a resources that are VPCs, this is the percentage of IP address space in the VPC that's taken up by subnet CIDRs.\n
\nFor resources that are subnets, if the subnet has an IPv4 CIDR provisioned to it, this is the percentage of IPv4 address space in the subnet that's in use. If the subnet has an IPv6 CIDR provisioned to it, the percentage of IPv6 address space in use is not represented. The percentage of IPv6 address space in use cannot currently be calculated.\n
\nFor resources that are public IPv4 pools, this is the percentage of IP address space in the pool that's been allocated to Elastic IP addresses (EIPs).\n
\nThe compliance status of the IPAM resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", + "smithy.api#documentation": "The compliance status of the IPAM resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", "smithy.api#xmlName": "complianceStatus" } }, @@ -49944,7 +49953,7 @@ "target": "com.amazonaws.ec2#IpamManagementState", "traits": { "aws.protocols#ec2QueryName": "ManagementState", - "smithy.api#documentation": "The management state of the resource. For more information about management states, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", + "smithy.api#documentation": "The management state of the resource. For more information about management states, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", "smithy.api#xmlName": "managementState" } }, @@ -49952,7 +49961,7 @@ "target": "com.amazonaws.ec2#IpamOverlapStatus", "traits": { "aws.protocols#ec2QueryName": "OverlapStatus", - "smithy.api#documentation": "The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", + "smithy.api#documentation": "The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
", "smithy.api#xmlName": "overlapStatus" } }, @@ -50131,7 +50140,7 @@ } }, "traits": { - "smithy.api#documentation": "In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict.
\nFor more information, see How IPAM works in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict.
\nFor more information, see How IPAM works in the Amazon VPC IPAM User Guide.
" } }, "com.amazonaws.ec2#IpamScopeId": { @@ -51342,7 +51351,7 @@ "ThreadsPerCore": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "The number of threads per CPU core. To disable multithreading for the instance,\n specify a value of 1. Otherwise, specify the default value of 2.
" + "smithy.api#documentation": "The number of threads per CPU core. To disable multithreading for the instance,\n specify a value of 1
. Otherwise, specify the default value of 2
.
The metadata options for the instance. For more information, see Instance Metadata and User Data in the\n Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#LaunchTemplateInstanceMetadataOptionsRequest": { @@ -51868,7 +51877,7 @@ "HttpPutResponseHopLimit": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "The desired HTTP PUT response hop limit for instance metadata requests. The larger the\n number, the further instance metadata requests can travel.
\nDefault: 1
\nPossible values: Integers from 1 to 64
" + "smithy.api#documentation": "The desired HTTP PUT response hop limit for instance metadata requests. The larger the\n number, the further instance metadata requests can travel.
\nDefault: 1
\n
Possible values: Integers from 1 to 64
" } }, "HttpEndpoint": { @@ -51891,7 +51900,7 @@ } }, "traits": { - "smithy.api#documentation": "The metadata options for the instance. For more information, see Instance Metadata and User Data in the\n Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#LaunchTemplateInstanceMetadataOptionsState": { @@ -52324,7 +52333,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "SpotPrice", - "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot\n Instance.
", + "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to \n increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
\nThe ID of the launch template.
" + "smithy.api#documentation": "The ID of the launch template.
\nYou must specify the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template.
" + "smithy.api#documentation": "The name of the launch template.
\nYou must specify the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The maximum hourly price you're willing to pay for the Spot Instances.
", + "smithy.api#documentation": "The maximum hourly price you're willing to pay for the Spot Instances. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
\nThe maximum hourly price you're willing to pay for the Spot Instances.
" + "smithy.api#documentation": "The maximum hourly price you're willing to pay for the Spot Instances. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
\nThe required duration for the Spot Instances (also known as Spot blocks), in minutes.\n This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).
" + "smithy.api#documentation": "Deprecated.
" } }, "ValidUntil": { "target": "com.amazonaws.ec2#DateTime", "traits": { - "smithy.api#documentation": "The end date of the request. For a one-time request, the request remains active until\n all instances launch, the request is canceled, or this date is reached. If the request\n is persistent, it remains active until it is canceled or this date and time is reached.\n The default end date is 7 days from the current date.
" + "smithy.api#documentation": "The end date of the request, in UTC format\n (YYYY-MM-DDTHH:MM:SSZ). Supported only for\n persistent requests.
\nFor a persistent request, the request remains active until the ValidUntil
\n date and time is reached. Otherwise, the request remains active until you cancel it.
For a one-time request, ValidUntil
is not supported. The request remains active until \n all instances launch or you cancel the request.
Default: 7 days from the current date
" } }, "InstanceInterruptionBehavior": { @@ -55711,7 +55720,7 @@ "target": "com.amazonaws.ec2#ModifyIpamPoolResult" }, "traits": { - "smithy.api#documentation": "Modify the configurations of an IPAM pool.
\nFor more information, see Modify a pool in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Modify the configurations of an IPAM pool.
\nFor more information, see Modify a pool in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#ModifyIpamPoolRequest": { @@ -55769,7 +55778,7 @@ "AddAllocationResourceTags": { "target": "com.amazonaws.ec2#RequestIpamResourceTagList", "traits": { - "smithy.api#documentation": "Add tag allocation rules to a pool. For more information about allocation rules, see Create a top-level pool in the Amazon VPC IPAM User Guide.
", + "smithy.api#documentation": "Add tag allocation rules to a pool. For more information about allocation rules, see Create a top-level pool in the Amazon VPC IPAM User Guide.
", "smithy.api#xmlName": "AddAllocationResourceTag" } }, @@ -55820,7 +55829,7 @@ "AddOperatingRegions": { "target": "com.amazonaws.ec2#AddIpamOperatingRegionSet", "traits": { - "smithy.api#documentation": "Choose the operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
", + "smithy.api#documentation": "Choose the operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
", "smithy.api#xmlName": "AddOperatingRegion" } }, @@ -55842,7 +55851,7 @@ "target": "com.amazonaws.ec2#ModifyIpamResourceCidrResult" }, "traits": { - "smithy.api#documentation": "Modify a resource CIDR. You can use this action to transfer resource CIDRs between scopes and ignore resource CIDRs that you do not want to manage. If set to false, the resource will not be tracked for overlap, it cannot be auto-imported into a pool, and it will be removed from any pool it has an allocation in.
\nFor more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "Modify a resource CIDR. You can use this action to transfer resource CIDRs between scopes and ignore resource CIDRs that you do not want to manage. If set to false, the resource will not be tracked for overlap, it cannot be auto-imported into a pool, and it will be removed from any pool it has an allocation in.
\nFor more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
" } }, "com.amazonaws.ec2#ModifyIpamResourceCidrRequest": { @@ -56001,13 +56010,13 @@ "LaunchTemplateId": { "target": "com.amazonaws.ec2#LaunchTemplateId", "traits": { - "smithy.api#documentation": "The ID of the launch template. You must specify either the launch template ID or\n launch template name in the request.
" + "smithy.api#documentation": "The ID of the launch template.
\nYou must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or\n launch template name in the request.
" + "smithy.api#documentation": "The name of the launch template.
\nYou must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
Move an BYOIP IPv4 CIDR to IPAM from a public IPv4 pool.
\nIf you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
" + "smithy.api#documentation": "Move an BYOIP IPv4 CIDR to IPAM from a public IPv4 pool.
\nIf you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
" } }, "com.amazonaws.ec2#MoveByoipCidrToIpamRequest": { @@ -60990,6 +60999,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "CoreNetworkId", + "smithy.api#documentation": "The ID of the core network where the transit gateway peer is located.
", "smithy.api#xmlName": "coreNetworkId" } }, @@ -62536,7 +62546,7 @@ "target": "com.amazonaws.ec2#ProvisionIpamPoolCidrResult" }, "traits": { - "smithy.api#documentation": "Provision a CIDR to an IPAM pool. You can use this action to provision new CIDRs to a top-level pool or to transfer a CIDR from a top-level pool to a pool within it.
\nFor more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Provision a CIDR to an IPAM pool. You can use this action to provision new CIDRs to a top-level pool or to transfer a CIDR from a top-level pool to a pool within it.
\nFor more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#ProvisionIpamPoolCidrRequest": { @@ -62591,7 +62601,7 @@ "target": "com.amazonaws.ec2#ProvisionPublicIpv4PoolCidrResult" }, "traits": { - "smithy.api#documentation": "Provision a CIDR to a public IPv4 pool.
\nFor more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
" + "smithy.api#documentation": "Provision a CIDR to a public IPv4 pool.
\nFor more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
" } }, "com.amazonaws.ec2#ProvisionPublicIpv4PoolCidrRequest": { @@ -63452,7 +63462,7 @@ "target": "com.amazonaws.ec2#RegisterImageResult" }, "traits": { - "smithy.api#documentation": "Registers an AMI. When you're creating an AMI, this is the final step you must complete\n before you can launch an instance from the AMI. For more information about creating AMIs, see\n Creating your\n own AMIs in the Amazon Elastic Compute Cloud User Guide.
\nFor Amazon EBS-backed instances, CreateImage creates and registers \n \tthe AMI in a single request, so you don't have to register the AMI yourself.
\nIf needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. \n If you make changes to an image, deregister the previous image and register the new image.
\n\n\n Register a snapshot of a root device volume\n
\n \tYou can use RegisterImage
to create an Amazon EBS-backed Linux AMI from\n a snapshot of a root device volume. You specify the snapshot using a block device mapping.\n You can't set the encryption state of the volume using the block device mapping. If the \n snapshot is encrypted, or encryption by default is enabled, the root volume of an instance \n launched from the AMI is encrypted.
For more information, see Create a Linux AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs\n in the Amazon Elastic Compute Cloud User Guide.
\n \n \t\n Amazon Web Services Marketplace product codes\n
\n \tIf any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new\n AMI.
\nWindows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE\n Linux Enterprise Server (SLES), use the Amazon EC2 billing product code associated with an AMI to\n verify the subscription status for package updates. To create a new AMI for operating systems\n that require a billing product code, instead of registering the AMI, do the following to\n preserve the billing product code association:
\nLaunch an instance from an existing AMI with that billing product code.
\nCustomize the instance.
\nCreate an AMI from the instance using CreateImage.
\nIf you purchase a Reserved Instance to apply to an On-Demand Instance that was launched\n from an AMI with a billing product code, make sure that the Reserved Instance has the matching\n billing product code. If you purchase a Reserved Instance without the matching billing product\n code, the Reserved Instance will not be applied to the On-Demand Instance. For information\n about how to obtain the platform details and billing information of an AMI, see Understanding AMI \n \tbilling in the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Registers an AMI. When you're creating an AMI, this is the final step you must complete\n before you can launch an instance from the AMI. For more information about creating AMIs, see\n Creating your\n own AMIs in the Amazon Elastic Compute Cloud User Guide.
\nFor Amazon EBS-backed instances, CreateImage creates and registers the AMI\n in a single request, so you don't have to register the AMI yourself. We recommend that you\n always use CreateImage unless you have a specific reason to use\n RegisterImage.
\nIf needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. \n If you make changes to an image, deregister the previous image and register the new image.
\n\n\n Register a snapshot of a root device volume\n
\n \tYou can use RegisterImage
to create an Amazon EBS-backed Linux AMI from\n a snapshot of a root device volume. You specify the snapshot using a block device mapping.\n You can't set the encryption state of the volume using the block device mapping. If the \n snapshot is encrypted, or encryption by default is enabled, the root volume of an instance \n launched from the AMI is encrypted.
For more information, see Create a Linux AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs\n in the Amazon Elastic Compute Cloud User Guide.
\n \n \t\n Amazon Web Services Marketplace product codes\n
\n \tIf any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new\n AMI.
\nWindows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE\n Linux Enterprise Server (SLES), use the Amazon EC2 billing product code associated with an AMI to\n verify the subscription status for package updates. To create a new AMI for operating systems\n that require a billing product code, instead of registering the AMI, do the following to\n preserve the billing product code association:
\nLaunch an instance from an existing AMI with that billing product code.
\nCustomize the instance.
\nCreate an AMI from the instance using CreateImage.
\nIf you purchase a Reserved Instance to apply to an On-Demand Instance that was launched\n from an AMI with a billing product code, make sure that the Reserved Instance has the matching\n billing product code. If you purchase a Reserved Instance without the matching billing product\n code, the Reserved Instance will not be applied to the On-Demand Instance. For information\n about how to obtain the platform details and billing information of an AMI, see Understanding AMI \n \tbilling in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#RegisterImageRequest": { @@ -64105,7 +64115,7 @@ "target": "com.amazonaws.ec2#ReleaseIpamPoolAllocationResult" }, "traits": { - "smithy.api#documentation": "Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.\n
" + "smithy.api#documentation": "Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.\n
" } }, "com.amazonaws.ec2#ReleaseIpamPoolAllocationRequest": { @@ -64164,7 +64174,7 @@ } }, "traits": { - "smithy.api#documentation": "Remove an operating Region from an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide\n
" + "smithy.api#documentation": "Remove an operating Region from an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only\n discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
\nFor more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide\n
" } }, "com.amazonaws.ec2#RemoveIpamOperatingRegionSet": { @@ -65317,7 +65327,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "SpotPrice", - "smithy.api#documentation": "The maximum price per hour that you are willing to pay for a Spot Instance. The\n default is the On-Demand price.
", + "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
\nThe tags to apply to the resources during launch. You can only tag instances and\n volumes on launch. The specified tags are applied to all instances or volumes that are\n created during launch. To tag a resource after it has been created, see CreateTags.
", + "smithy.api#documentation": "The tags to apply to the resources that are created during instance launch.
\nYou can specify tags for the following resources only:
\nInstances
\nVolumes
\nElastic graphics
\nSpot Instance requests
\nNetwork interfaces
\nTo tag a resource after it has been created, see CreateTags.
", "smithy.api#xmlName": "TagSpecification" } }, "LaunchTemplate": { "target": "com.amazonaws.ec2#LaunchTemplateSpecification", "traits": { - "smithy.api#documentation": "The launch template to use to launch the instances. Any parameters that you specify in\n RunInstances override the same parameters in the launch template.\n You can specify either the name or ID of a launch template, but not both.
" + "smithy.api#documentation": "The launch template to use to launch the instances. Any parameters that you specify in\n RunInstances override the same parameters in the launch template.\n You can specify either the name or ID of a launch template, but not both.
" } }, "InstanceMarketOptions": { @@ -68735,7 +68745,7 @@ "CreditSpecification": { "target": "com.amazonaws.ec2#CreditSpecificationRequest", "traits": { - "smithy.api#documentation": "The credit option for CPU usage of the burstable performance instance. Valid values\n are standard
and unlimited
. To change this attribute after\n launch, use \n ModifyInstanceCreditSpecification. For more information, see Burstable\n performance instances in the Amazon EC2 User Guide.
Default: standard
(T2 instances) or unlimited
(T3/T3a\n instances)
For T3 instances with host
tenancy, only standard
is\n supported.
The credit option for CPU usage of the burstable performance instance. Valid values\n are standard
and unlimited
. To change this attribute after\n launch, use \n ModifyInstanceCreditSpecification. For more information, see Burstable\n performance instances in the Amazon EC2 User Guide.
Default: standard
(T2 instances) or unlimited
(T3/T3a\n instances)
For T3 instances with host
tenancy, only standard
is\n supported.
Indicates whether an instance is enabled for hibernation. For more information, see\n Hibernate\n your instance in the Amazon EC2 User Guide.
\nYou can't enable hibernation and Amazon Web Services Nitro Enclaves on the same\n instance.
" + "smithy.api#documentation": "Indicates whether an instance is enabled for hibernation. For more information, see\n Hibernate\n your instance in the Amazon EC2 User Guide.
\nYou can't enable hibernation and Amazon Web Services Nitro Enclaves on the same\n instance.
" } }, "LicenseSpecifications": { @@ -68772,7 +68782,7 @@ "EnclaveOptions": { "target": "com.amazonaws.ec2#EnclaveOptionsRequest", "traits": { - "smithy.api#documentation": "Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For\n more information, see What is Amazon Web Services Nitro\n Enclaves? in the Amazon Web Services Nitro Enclaves User\n Guide.
\nYou can't enable Amazon Web Services Nitro Enclaves and hibernation on the same\n instance.
" + "smithy.api#documentation": "Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For\n more information, see What is Amazon Web Services Nitro\n Enclaves? in the Amazon Web Services Nitro Enclaves User\n Guide.
\nYou can't enable Amazon Web Services Nitro Enclaves and hibernation on the same\n instance.
" } }, "PrivateDnsNameOptions": { @@ -68790,7 +68800,7 @@ "DisableApiStop": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "Indicates whether an instance is enabled for stop protection. For more information,\n see Stop\n Protection.\n
" + "smithy.api#documentation": "Indicates whether an instance is enabled for stop protection. For more information,\n see Stop\n protection.\n
" } } } @@ -70524,7 +70534,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ServiceName", - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the service.
", + "smithy.api#documentation": "The name of the service.
", "smithy.api#xmlName": "serviceName" } }, @@ -71686,7 +71696,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "SpotPrice", - "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance. \n If this value is not specified, the default is the Spot price specified for the fleet.\n To determine the Spot price per unit hour, divide the Spot price by the\n value of WeightedCapacity
.
The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to \n increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
\nThe maximum price per unit hour that you are willing to pay for a Spot Instance. The\n default is the On-Demand price.
", + "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
\nThe maximum price per hour that you are willing to pay for a Spot Instance.
", + "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
\nThe maximum hourly price you're willing to pay for the Spot Instances. The default is\n the On-Demand price.
" + "smithy.api#documentation": "The maximum hourly price that you're willing to pay for a Spot Instance. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
\nThe maximum amount per hour for Spot Instances that you're willing to pay.
", + "smithy.api#documentation": "The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
\nThe maximum amount per hour for Spot Instances that you're willing to pay.
" + "smithy.api#documentation": "The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
\nThe maximum price per hour that you are willing to pay for a Spot Instance.
", + "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
\nDescribes the maximum price per hour that you are willing to pay for a Spot\n Instance.
" + "smithy.api#documentation": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
\nIf you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
\nThe tags to apply to a resource when the resource is being created.
" + "smithy.api#documentation": "The tags to apply to a resource when the resource is being created.
\nThe Valid Values
lists all the resource types that can be tagged.\n However, the action you're using might not support tagging all of these resource types.\n If you try to tag a resource type that is unsupported for the action you're using,\n you'll get an error.
Details about the transit gateway peering attachment.
", "smithy.api#xmlName": "options" } }, @@ -76768,9 +76779,13 @@ "target": "com.amazonaws.ec2#DynamicRoutingValue", "traits": { "aws.protocols#ec2QueryName": "DynamicRouting", + "smithy.api#documentation": "Describes whether dynamic routing is enabled or disabled for the transit gateway peering attachment.
", "smithy.api#xmlName": "dynamicRouting" } } + }, + "traits": { + "smithy.api#documentation": "Describes dynamic routing for the transit gateway peering attachment.
" } }, "com.amazonaws.ec2#TransitGatewayPolicyRule": { @@ -77321,6 +77336,7 @@ "target": "com.amazonaws.ec2#TransitGatewayRouteTableAnnouncementId", "traits": { "aws.protocols#ec2QueryName": "TransitGatewayRouteTableAnnouncementId", + "smithy.api#documentation": "The ID of the transit gateway route table announcement.
", "smithy.api#xmlName": "transitGatewayRouteTableAnnouncementId" } }, @@ -77517,6 +77533,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "CoreNetworkId", + "smithy.api#documentation": "The ID of the core network for the transit gateway route table announcement.
", "smithy.api#xmlName": "coreNetworkId" } }, @@ -77532,6 +77549,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "PeerCoreNetworkId", + "smithy.api#documentation": "The ID of the core network ID for the peer.
", "smithy.api#xmlName": "peerCoreNetworkId" } }, From d790bda6e7dffcb5a74c12550d4bbd14bdb4051c Mon Sep 17 00:00:00 2001 From: awstoolsInventory data for installed discovery agents.
*/ @@ -1721,6 +1740,8 @@ export interface GetDiscoverySummaryResponse { * */ meCollectorSummary?: CustomerMeCollectorInfo; + + agentlessCollectorSummary?: CustomerAgentlessCollectorInfo; } export namespace GetDiscoverySummaryResponse { diff --git a/clients/client-application-discovery-service/src/protocols/Aws_json1_1.ts b/clients/client-application-discovery-service/src/protocols/Aws_json1_1.ts index 1c69e7f4df446..61654dfcff551 100644 --- a/clients/client-application-discovery-service/src/protocols/Aws_json1_1.ts +++ b/clients/client-application-discovery-service/src/protocols/Aws_json1_1.ts @@ -107,6 +107,7 @@ import { CreateTagsRequest, CreateTagsResponse, CustomerAgentInfo, + CustomerAgentlessCollectorInfo, CustomerConnectorInfo, CustomerMeCollectorInfo, DeleteApplicationsRequest, @@ -2758,6 +2759,21 @@ const deserializeAws_json1_1CustomerAgentInfo = (output: any, context: __SerdeCo } as any; }; +const deserializeAws_json1_1CustomerAgentlessCollectorInfo = ( + output: any, + context: __SerdeContext +): CustomerAgentlessCollectorInfo => { + return { + activeAgentlessCollectors: __expectInt32(output.activeAgentlessCollectors), + denyListedAgentlessCollectors: __expectInt32(output.denyListedAgentlessCollectors), + healthyAgentlessCollectors: __expectInt32(output.healthyAgentlessCollectors), + shutdownAgentlessCollectors: __expectInt32(output.shutdownAgentlessCollectors), + totalAgentlessCollectors: __expectInt32(output.totalAgentlessCollectors), + unhealthyAgentlessCollectors: __expectInt32(output.unhealthyAgentlessCollectors), + unknownAgentlessCollectors: __expectInt32(output.unknownAgentlessCollectors), + } as any; +}; + const deserializeAws_json1_1CustomerConnectorInfo = (output: any, context: __SerdeContext): CustomerConnectorInfo => { return { activeConnectors: __expectInt32(output.activeConnectors), @@ -2954,6 +2970,10 @@ const deserializeAws_json1_1GetDiscoverySummaryResponse = ( return { agentSummary: output.agentSummary != null ? deserializeAws_json1_1CustomerAgentInfo(output.agentSummary, context) : undefined, + agentlessCollectorSummary: + output.agentlessCollectorSummary != null + ? deserializeAws_json1_1CustomerAgentlessCollectorInfo(output.agentlessCollectorSummary, context) + : undefined, applications: __expectLong(output.applications), connectorSummary: output.connectorSummary != null diff --git a/codegen/sdk-codegen/aws-models/application-discovery-service.json b/codegen/sdk-codegen/aws-models/application-discovery-service.json index baf49bd6d30ab..a9250f7065823 100644 --- a/codegen/sdk-codegen/aws-models/application-discovery-service.json +++ b/codegen/sdk-codegen/aws-models/application-discovery-service.json @@ -942,6 +942,53 @@ "smithy.api#documentation": "Inventory data for installed discovery agents.
" } }, + "com.amazonaws.applicationdiscoveryservice#CustomerAgentlessCollectorInfo": { + "type": "structure", + "members": { + "activeAgentlessCollectors": { + "target": "com.amazonaws.applicationdiscoveryservice#Integer", + "traits": { + "smithy.api#required": {} + } + }, + "healthyAgentlessCollectors": { + "target": "com.amazonaws.applicationdiscoveryservice#Integer", + "traits": { + "smithy.api#required": {} + } + }, + "denyListedAgentlessCollectors": { + "target": "com.amazonaws.applicationdiscoveryservice#Integer", + "traits": { + "smithy.api#required": {} + } + }, + "shutdownAgentlessCollectors": { + "target": "com.amazonaws.applicationdiscoveryservice#Integer", + "traits": { + "smithy.api#required": {} + } + }, + "unhealthyAgentlessCollectors": { + "target": "com.amazonaws.applicationdiscoveryservice#Integer", + "traits": { + "smithy.api#required": {} + } + }, + "totalAgentlessCollectors": { + "target": "com.amazonaws.applicationdiscoveryservice#Integer", + "traits": { + "smithy.api#required": {} + } + }, + "unknownAgentlessCollectors": { + "target": "com.amazonaws.applicationdiscoveryservice#Integer", + "traits": { + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.applicationdiscoveryservice#CustomerConnectorInfo": { "type": "structure", "members": { @@ -1771,6 +1818,9 @@ }, "com.amazonaws.applicationdiscoveryservice#ExportConfigurations": { "type": "operation", + "input": { + "target": "smithy.api#Unit" + }, "output": { "target": "com.amazonaws.applicationdiscoveryservice#ExportConfigurationsResponse" }, @@ -2104,6 +2154,9 @@ "traits": { "smithy.api#documentation": "\n Details about Migration Evaluator collectors, including collector status and health.\n
" } + }, + "agentlessCollectorSummary": { + "target": "com.amazonaws.applicationdiscoveryservice#CustomerAgentlessCollectorInfo" } } }, @@ -2350,7 +2403,7 @@ "min": 1, "max": 4000 }, - "smithy.api#pattern": "^\\S+:\\/\\/\\S+\\/[\\s\\S]*\\S[\\s\\S]*$" + "smithy.api#pattern": "^\\S+://\\S+/[\\s\\S]*\\S[\\s\\S]*$" } }, "com.amazonaws.applicationdiscoveryservice#Integer": { From 250fd19d65a83b3d183ced9863bf6aba8f1fb698 Mon Sep 17 00:00:00 2001 From: awstoolsCreates a custom key store that is associated with an CloudHSM cluster that you own and * manage.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*Before you create the custom key store, you must assemble @@ -671,7 +671,9 @@ export class KMS extends KMSClient { *
To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for
* KeySpec
, SYMMETRIC_DEFAULT
, and the default value for
- * KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key.
KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key. For technical details, see
+ *
+ * SYMMETRIC_DEFAULT key spec in the Key Management Service Developer Guide.
* If you need a key for basic encryption and decryption or you * are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.
*@@ -682,9 +684,9 @@ export class KMS extends KMSClient { * the type of key material in the KMS key. Then, use the
KeyUsage
parameter
* to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.
* You can't change these properties after the KMS key is created.
- * Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric + *
Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric * KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key - * so it can be used outside of KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). + * so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). * KMS keys with ECC key pairs can be used only to sign and verify messages. * For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
*@@ -999,7 +1001,7 @@ export class KMS extends KMSClient { /** *
Deletes a custom key store. This operation does not delete the CloudHSM cluster that is * associated with the custom key store, or affect any users or keys in the cluster.
- *The custom key store that you delete cannot contain any KMS KMS keys. Before deleting the key store, + *
The custom key store that you delete cannot contain any KMS keys. Before deleting the key store,
* verify that you will never need to use any of the KMS keys in the key store for any
* cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the
* key store. When the scheduled waiting period expires, the ScheduleKeyDeletion
@@ -1013,7 +1015,7 @@ export class KMS extends KMSClient {
* delete KMS keys and you can reconnect a disconnected custom key store at any time.
If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*@@ -1145,7 +1147,7 @@ export class KMS extends KMSClient { /** *
Gets information about custom key stores in the account and Region.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*By default, this operation returns information about all custom key @@ -1247,7 +1249,7 @@ export class KMS extends KMSClient { *
Whether automatic key rotation is enabled on the KMS key. To get this information, use * GetKeyRotationStatus. Also, some key states prevent a KMS key from * being automatically rotated. For details, see How Automatic Key Rotation - * Works in Key Management Service Developer Guide.
+ * Works in the Key Management Service Developer Guide. *Tags on the KMS key. To get this information, use ListResourceTags.
@@ -1452,7 +1454,7 @@ export class KMS extends KMSClient { * ConnectCustomKeyStore operation. *If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
* @@ -1649,7 +1651,7 @@ export class KMS extends KMSClient { *InvalidCiphertextException
. For more information, see Encryption
* Context in the Key Management Service Developer Guide.
* If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The - * algorithm must be compatible with the KMS key type.
+ * algorithm must be compatible with the KMS key spec. *When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.
*You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.
@@ -1713,6 +1715,10 @@ export class KMS extends KMSClient { *
+ * SM2PKE
: 1024 bytes (China Regions only)
The KMS key that you use for this operation must be in a compatible key state. For * details, see Key states of KMS keys in the Key Management Service Developer Guide.
@@ -1770,15 +1776,21 @@ export class KMS extends KMSClient { /** *Returns a unique symmetric data key for use outside of KMS. This operation returns a * plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS - * key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS - * key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted - * data key with the encrypted data.
+ * key that you specify. The bytes in the plaintext key are random; they are not related + * to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS + * and store the encrypted data key with the encrypted data. * *To generate a data key, specify the symmetric encryption KMS key that will be used to
* encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the
- * type of your KMS key, use the DescribeKey operation. You must also specify the length of
- * the data key. Use either the KeySpec
or NumberOfBytes
parameters
- * (but not both). For 128-bit and 256-bit data keys, use the KeySpec
parameter.
You must also specify the length of the data key. Use either the KeySpec
or
+ * NumberOfBytes
parameters (but not both). For 128-bit and 256-bit data keys, use
+ * the KeySpec
parameter.
To generate an SM4 data key (China Regions only), specify a KeySpec
value of
+ * AES_128
or NumberOfBytes
value of 128
. The symmetric
+ * encryption key used in China Regions to encrypt your data key is an SM4 encryption key.
To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use * the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure @@ -1910,9 +1922,9 @@ export class KMS extends KMSClient { * custom key store. To get the type and origin of your KMS key, use the DescribeKey * operation.
*Use the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data
- * key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for
- * either encryption or signing, but not both. However, KMS cannot enforce any restrictions on
- * the use of data key pairs outside of KMS.
If you are using the data key pair to encrypt data, or for any operation where you don't * immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. @@ -2017,9 +2029,9 @@ export class KMS extends KMSClient { * custom key store. To get the type and origin of your KMS key, use the DescribeKey * operation.
*Use the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data
- * key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for
- * either encryption or signing, but not both. However, KMS cannot enforce any restrictions on
- * the use of data key pairs outside of KMS.
* GenerateDataKeyPairWithoutPlaintext
returns a unique data key pair for each
* request. The bytes in the key are not related to the caller or KMS key that is used to encrypt
@@ -2258,13 +2270,16 @@ export class KMS extends KMSClient {
/**
*
Returns a random byte string that is cryptographically secure.
+ *You must use the NumberOfBytes
parameter to specify the length of the random
+ * byte string. There is no default value for string length.
By default, the random byte string is generated in KMS. To generate the byte string in * the CloudHSM cluster that is associated with a custom key store, specify the custom key store * ID.
*Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.
*For more information about entropy and random number generation, see * Key Management Service Cryptographic Details.
- * + *
+ * Cross-account use: Not applicable. GenerateRandom
does not use any account-specific resources, such as KMS keys.
* Required permissions: kms:GenerateRandom (IAM policy)
*/ @@ -2495,8 +2510,11 @@ export class KMS extends KMSClient { * KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the * public key within KMS, you benefit from the authentication, authorization, and logging that * are part of every KMS operation. You also reduce of risk of encrypting data that cannot be - * decrypted. These features are not effective outside of KMS. For details, see Special - * Considerations for Downloading Public Keys. + * decrypted. These features are not effective outside of KMS. + *To verify a signature outside of KMS with an SM2 public key (China Regions only), you must
+ * specify the distinguishing ID. By default, KMS uses 1234567812345678
as the
+ * distinguishing ID. For more information, see Offline verification
+ * with SM2 key pairs.
To help you use the public key safely outside of KMS, GetPublicKey
returns
* important information about the public key in the response, including:
If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*@@ -4130,7 +4148,11 @@ export class KMS extends KMSClient { * signature.
*You can also verify the digital signature by using the public key of the KMS key outside
* of KMS. Use the GetPublicKey operation to download the public key in the
- * asymmetric KMS key and then use the public key to verify the signature outside of KMS. The
+ * asymmetric KMS key and then use the public key to verify the signature outside of KMS. To
+ * verify a signature outside of KMS with an SM2 public key, you must specify the distinguishing
+ * ID. By default, KMS uses 1234567812345678
as the distinguishing ID. For more
+ * information, see Offline
+ * verification with SM2 key pairs in Key Management Service Developer Guide. The
* advantage of using the Verify
operation is that it is performed within KMS. As
* a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged
* in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use
diff --git a/clients/client-kms/src/commands/CreateCustomKeyStoreCommand.ts b/clients/client-kms/src/commands/CreateCustomKeyStoreCommand.ts
index ca08cd4de3467..7d6c110b69db4 100644
--- a/clients/client-kms/src/commands/CreateCustomKeyStoreCommand.ts
+++ b/clients/client-kms/src/commands/CreateCustomKeyStoreCommand.ts
@@ -25,7 +25,7 @@ export interface CreateCustomKeyStoreCommandOutput extends CreateCustomKeyStoreR
/**
*
Creates a custom key store that is associated with an CloudHSM cluster that you own and * manage.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*Before you create the custom key store, you must assemble diff --git a/clients/client-kms/src/commands/CreateKeyCommand.ts b/clients/client-kms/src/commands/CreateKeyCommand.ts index e788a85ff14a6..5f739f6930f85 100644 --- a/clients/client-kms/src/commands/CreateKeyCommand.ts +++ b/clients/client-kms/src/commands/CreateKeyCommand.ts @@ -34,7 +34,9 @@ export interface CreateKeyCommandOutput extends CreateKeyResponse, __MetadataBea *
To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for
* KeySpec
, SYMMETRIC_DEFAULT
, and the default value for
- * KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key.
KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key. For technical details, see
+ *
+ * SYMMETRIC_DEFAULT key spec in the Key Management Service Developer Guide.
* If you need a key for basic encryption and decryption or you * are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.
*@@ -45,9 +47,9 @@ export interface CreateKeyCommandOutput extends CreateKeyResponse, __MetadataBea * the type of key material in the KMS key. Then, use the
KeyUsage
parameter
* to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.
* You can't change these properties after the KMS key is created.
- * Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric + *
Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric * KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key - * so it can be used outside of KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). + * so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). * KMS keys with ECC key pairs can be used only to sign and verify messages. * For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
*diff --git a/clients/client-kms/src/commands/DeleteCustomKeyStoreCommand.ts b/clients/client-kms/src/commands/DeleteCustomKeyStoreCommand.ts index daca2d97e23ae..3bc3d125ed990 100644 --- a/clients/client-kms/src/commands/DeleteCustomKeyStoreCommand.ts +++ b/clients/client-kms/src/commands/DeleteCustomKeyStoreCommand.ts @@ -25,7 +25,7 @@ export interface DeleteCustomKeyStoreCommandOutput extends DeleteCustomKeyStoreR /** *
Deletes a custom key store. This operation does not delete the CloudHSM cluster that is * associated with the custom key store, or affect any users or keys in the cluster.
- *The custom key store that you delete cannot contain any KMS KMS keys. Before deleting the key store, + *
The custom key store that you delete cannot contain any KMS keys. Before deleting the key store,
* verify that you will never need to use any of the KMS keys in the key store for any
* cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the
* key store. When the scheduled waiting period expires, the ScheduleKeyDeletion
@@ -39,7 +39,7 @@ export interface DeleteCustomKeyStoreCommandOutput extends DeleteCustomKeyStoreR
* delete KMS keys and you can reconnect a disconnected custom key store at any time.
If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*diff --git a/clients/client-kms/src/commands/DescribeCustomKeyStoresCommand.ts b/clients/client-kms/src/commands/DescribeCustomKeyStoresCommand.ts index ca7902bc23843..c717d44127ca7 100644 --- a/clients/client-kms/src/commands/DescribeCustomKeyStoresCommand.ts +++ b/clients/client-kms/src/commands/DescribeCustomKeyStoresCommand.ts @@ -24,7 +24,7 @@ export interface DescribeCustomKeyStoresCommandOutput extends DescribeCustomKeyS /** *
Gets information about custom key stores in the account and Region.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*By default, this operation returns information about all custom key diff --git a/clients/client-kms/src/commands/DescribeKeyCommand.ts b/clients/client-kms/src/commands/DescribeKeyCommand.ts index ad303d0551df6..df77f6726f620 100644 --- a/clients/client-kms/src/commands/DescribeKeyCommand.ts +++ b/clients/client-kms/src/commands/DescribeKeyCommand.ts @@ -41,7 +41,7 @@ export interface DescribeKeyCommandOutput extends DescribeKeyResponse, __Metadat *
Whether automatic key rotation is enabled on the KMS key. To get this information, use * GetKeyRotationStatus. Also, some key states prevent a KMS key from * being automatically rotated. For details, see How Automatic Key Rotation - * Works in Key Management Service Developer Guide.
+ * Works in the Key Management Service Developer Guide. *Tags on the KMS key. To get this information, use ListResourceTags.
diff --git a/clients/client-kms/src/commands/DisconnectCustomKeyStoreCommand.ts b/clients/client-kms/src/commands/DisconnectCustomKeyStoreCommand.ts index 2e63ccbea51c3..9c0d6ec1e803d 100644 --- a/clients/client-kms/src/commands/DisconnectCustomKeyStoreCommand.ts +++ b/clients/client-kms/src/commands/DisconnectCustomKeyStoreCommand.ts @@ -36,7 +36,7 @@ export interface DisconnectCustomKeyStoreCommandOutput extends DisconnectCustomK * ConnectCustomKeyStore operation. *If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
* diff --git a/clients/client-kms/src/commands/EncryptCommand.ts b/clients/client-kms/src/commands/EncryptCommand.ts index f79ffe8cc6331..6e7f374e1aa32 100644 --- a/clients/client-kms/src/commands/EncryptCommand.ts +++ b/clients/client-kms/src/commands/EncryptCommand.ts @@ -33,7 +33,7 @@ export interface EncryptCommandOutput extends EncryptResponse, __MetadataBearer *InvalidCiphertextException
. For more information, see Encryption
* Context in the Key Management Service Developer Guide.
* If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The - * algorithm must be compatible with the KMS key type.
+ * algorithm must be compatible with the KMS key spec. *When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.
*You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.
@@ -97,6 +97,10 @@ export interface EncryptCommandOutput extends EncryptResponse, __MetadataBearer *
+ * SM2PKE
: 1024 bytes (China Regions only)
The KMS key that you use for this operation must be in a compatible key state. For * details, see Key states of KMS keys in the Key Management Service Developer Guide.
diff --git a/clients/client-kms/src/commands/GenerateDataKeyCommand.ts b/clients/client-kms/src/commands/GenerateDataKeyCommand.ts index 9f674a315610c..e0f2e9e56aeb7 100644 --- a/clients/client-kms/src/commands/GenerateDataKeyCommand.ts +++ b/clients/client-kms/src/commands/GenerateDataKeyCommand.ts @@ -25,15 +25,21 @@ export interface GenerateDataKeyCommandOutput extends GenerateDataKeyResponse, _ /** *Returns a unique symmetric data key for use outside of KMS. This operation returns a * plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS - * key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS - * key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted - * data key with the encrypted data.
+ * key that you specify. The bytes in the plaintext key are random; they are not related + * to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS + * and store the encrypted data key with the encrypted data. * *To generate a data key, specify the symmetric encryption KMS key that will be used to
* encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the
- * type of your KMS key, use the DescribeKey operation. You must also specify the length of
- * the data key. Use either the KeySpec
or NumberOfBytes
parameters
- * (but not both). For 128-bit and 256-bit data keys, use the KeySpec
parameter.
You must also specify the length of the data key. Use either the KeySpec
or
+ * NumberOfBytes
parameters (but not both). For 128-bit and 256-bit data keys, use
+ * the KeySpec
parameter.
To generate an SM4 data key (China Regions only), specify a KeySpec
value of
+ * AES_128
or NumberOfBytes
value of 128
. The symmetric
+ * encryption key used in China Regions to encrypt your data key is an SM4 encryption key.
To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use * the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure diff --git a/clients/client-kms/src/commands/GenerateDataKeyPairCommand.ts b/clients/client-kms/src/commands/GenerateDataKeyPairCommand.ts index c2b16f35cdb2b..a671d1cb361eb 100644 --- a/clients/client-kms/src/commands/GenerateDataKeyPairCommand.ts +++ b/clients/client-kms/src/commands/GenerateDataKeyPairCommand.ts @@ -39,9 +39,9 @@ export interface GenerateDataKeyPairCommandOutput extends GenerateDataKeyPairRes * custom key store. To get the type and origin of your KMS key, use the DescribeKey * operation.
*Use the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data
- * key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for
- * either encryption or signing, but not both. However, KMS cannot enforce any restrictions on
- * the use of data key pairs outside of KMS.
If you are using the data key pair to encrypt data, or for any operation where you don't * immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. diff --git a/clients/client-kms/src/commands/GenerateDataKeyPairWithoutPlaintextCommand.ts b/clients/client-kms/src/commands/GenerateDataKeyPairWithoutPlaintextCommand.ts index f8df32c2c444b..17dcf673805f7 100644 --- a/clients/client-kms/src/commands/GenerateDataKeyPairWithoutPlaintextCommand.ts +++ b/clients/client-kms/src/commands/GenerateDataKeyPairWithoutPlaintextCommand.ts @@ -41,9 +41,9 @@ export interface GenerateDataKeyPairWithoutPlaintextCommandOutput * custom key store. To get the type and origin of your KMS key, use the DescribeKey * operation.
*Use the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data
- * key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for
- * either encryption or signing, but not both. However, KMS cannot enforce any restrictions on
- * the use of data key pairs outside of KMS.
* GenerateDataKeyPairWithoutPlaintext
returns a unique data key pair for each
* request. The bytes in the key are not related to the caller or KMS key that is used to encrypt
diff --git a/clients/client-kms/src/commands/GenerateRandomCommand.ts b/clients/client-kms/src/commands/GenerateRandomCommand.ts
index 5c1453a1682b4..6ddb12452e15e 100644
--- a/clients/client-kms/src/commands/GenerateRandomCommand.ts
+++ b/clients/client-kms/src/commands/GenerateRandomCommand.ts
@@ -24,13 +24,16 @@ export interface GenerateRandomCommandOutput extends GenerateRandomResponse, __M
/**
*
Returns a random byte string that is cryptographically secure.
+ *You must use the NumberOfBytes
parameter to specify the length of the random
+ * byte string. There is no default value for string length.
By default, the random byte string is generated in KMS. To generate the byte string in * the CloudHSM cluster that is associated with a custom key store, specify the custom key store * ID.
*Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.
*For more information about entropy and random number generation, see * Key Management Service Cryptographic Details.
- * + *
+ * Cross-account use: Not applicable. GenerateRandom
does not use any account-specific resources, such as KMS keys.
* Required permissions: kms:GenerateRandom (IAM policy)
* @example diff --git a/clients/client-kms/src/commands/GetPublicKeyCommand.ts b/clients/client-kms/src/commands/GetPublicKeyCommand.ts index eb588316d7d2f..6b8e236969a4b 100644 --- a/clients/client-kms/src/commands/GetPublicKeyCommand.ts +++ b/clients/client-kms/src/commands/GetPublicKeyCommand.ts @@ -32,8 +32,11 @@ export interface GetPublicKeyCommandOutput extends GetPublicKeyResponse, __Metad * KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the * public key within KMS, you benefit from the authentication, authorization, and logging that * are part of every KMS operation. You also reduce of risk of encrypting data that cannot be - * decrypted. These features are not effective outside of KMS. For details, see Special - * Considerations for Downloading Public Keys. + * decrypted. These features are not effective outside of KMS. + *To verify a signature outside of KMS with an SM2 public key (China Regions only), you must
+ * specify the distinguishing ID. By default, KMS uses 1234567812345678
as the
+ * distinguishing ID. For more information, see Offline verification
+ * with SM2 key pairs.
To help you use the public key safely outside of KMS, GetPublicKey
returns
* important information about the public key in the response, including:
If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*diff --git a/clients/client-kms/src/commands/VerifyCommand.ts b/clients/client-kms/src/commands/VerifyCommand.ts index dff46a267f423..52fccfba53d6e 100644 --- a/clients/client-kms/src/commands/VerifyCommand.ts +++ b/clients/client-kms/src/commands/VerifyCommand.ts @@ -35,7 +35,11 @@ export interface VerifyCommandOutput extends VerifyResponse, __MetadataBearer {} * signature.
*You can also verify the digital signature by using the public key of the KMS key outside
* of KMS. Use the GetPublicKey operation to download the public key in the
- * asymmetric KMS key and then use the public key to verify the signature outside of KMS. The
+ * asymmetric KMS key and then use the public key to verify the signature outside of KMS. To
+ * verify a signature outside of KMS with an SM2 public key, you must specify the distinguishing
+ * ID. By default, KMS uses 1234567812345678
as the distinguishing ID. For more
+ * information, see Offline
+ * verification with SM2 key pairs in Key Management Service Developer Guide. The
* advantage of using the Verify
operation is that it is performed within KMS. As
* a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged
* in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use
diff --git a/clients/client-kms/src/models/models_0.ts b/clients/client-kms/src/models/models_0.ts
index b00f17b244947..565a884c467b8 100644
--- a/clients/client-kms/src/models/models_0.ts
+++ b/clients/client-kms/src/models/models_0.ts
@@ -450,6 +450,7 @@ export class CustomKeyStoreNotFoundException extends __BaseException {
export enum ConnectionErrorCodeType {
CLUSTER_NOT_FOUND = "CLUSTER_NOT_FOUND",
INSUFFICIENT_CLOUDHSM_HSMS = "INSUFFICIENT_CLOUDHSM_HSMS",
+ INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET = "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET",
INTERNAL_ERROR = "INTERNAL_ERROR",
INVALID_CREDENTIALS = "INVALID_CREDENTIALS",
NETWORK_ERRORS = "NETWORK_ERRORS",
@@ -565,13 +566,13 @@ export interface CreateCustomKeyStoreRequest {
* CloudHSM cluster that is not already associated with a custom key store. To find the cluster ID,
* use the DescribeClusters operation.
Enter the content of the trust anchor certificate for the cluster. This is the content of
* the customerCA.crt
file that you created when you initialized the cluster.
A key policy document must conform to the following rules.
+ *A key policy document can include only the following characters:
*Up to 32 kilobytes (32768 bytes)
+ *Printable ASCII characters from the space character (\u0020
) through the end of the ASCII character range.
Must be UTF-8 encoded
+ *Printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF
).
The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.
- *The Sid
element in a key policy statement can include spaces. (Spaces are
- * prohibited in the Sid
element of an IAM policy document.)
The tab (\u0009
), line feed (\u000A
), and carriage return (\u000D
) special characters
For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the + *
For information about key policies, see Key policies in KMS in the + * Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the * Identity and Access Management User Guide * .
*/ @@ -1045,6 +1045,10 @@ export interface CreateKeyRequest { *For asymmetric KMS keys with ECC key material, specify
* SIGN_VERIFY
.
For asymmetric KMS keys with SM2 key material (China Regions only), specify ENCRYPT_DECRYPT
or
+ * SIGN_VERIFY
.
Specifies the type of KMS key to create. The default value,
- * SYMMETRIC_DEFAULT
, creates a KMS key with a 256-bit symmetric key for encryption
- * and decryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the
+ * SYMMETRIC_DEFAULT
, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions,
+ * where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the
* Key Management Service Developer Guide
* .
The KeySpec
determines whether the KMS key contains a symmetric key or an
@@ -1085,7 +1089,8 @@ export interface CreateKeyRequest {
*
- * SYMMETRIC_DEFAULT
(AES-256-GCM)
SYMMETRIC_DEFAULT
+ *
* SM2 key pairs (China Regions only)
+ *
+ * SM2
+ *
To find the ID of a custom key store, use the DescribeCustomKeyStores operation.
*The response includes the custom key store ID and the ID of the CloudHSM cluster.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*/ @@ -1259,6 +1274,7 @@ export namespace CreateKeyRequest { export enum EncryptionAlgorithmSpec { RSAES_OAEP_SHA_1 = "RSAES_OAEP_SHA_1", RSAES_OAEP_SHA_256 = "RSAES_OAEP_SHA_256", + SM2PKE = "SM2PKE", SYMMETRIC_DEFAULT = "SYMMETRIC_DEFAULT", } @@ -1364,6 +1380,7 @@ export enum SigningAlgorithmSpec { RSASSA_PSS_SHA_256 = "RSASSA_PSS_SHA_256", RSASSA_PSS_SHA_384 = "RSASSA_PSS_SHA_384", RSASSA_PSS_SHA_512 = "RSASSA_PSS_SHA_512", + SM2DSA = "SM2DSA", } /** @@ -1801,6 +1818,7 @@ export enum DataKeyPairSpec { RSA_2048 = "RSA_2048", RSA_3072 = "RSA_3072", RSA_4096 = "RSA_4096", + SM2 = "SM2", } export enum DataKeySpec { @@ -2659,7 +2677,7 @@ export interface GenerateDataKeyPairRequest { /** *Determines the type of data key pair that is generated.
- *The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS.
+ *The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.
*/ KeyPairSpec: DataKeyPairSpec | string | undefined; @@ -2760,7 +2778,7 @@ export interface GenerateDataKeyPairWithoutPlaintextRequest { /** *Determines the type of data key pair that is generated.
- *The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS.
+ *The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.
*/ KeyPairSpec: DataKeyPairSpec | string | undefined; @@ -2975,7 +2993,7 @@ export namespace GenerateMacResponse { export interface GenerateRandomRequest { /** - *The length of the byte string.
+ *The length of the random byte string. This parameter is required.
*/ NumberOfBytes?: number; @@ -4034,22 +4052,22 @@ export interface PutKeyPolicyRequest { *A key policy document must conform to the following rules.
+ *A key policy document can include only the following characters:
*Up to 32 kilobytes (32768 bytes)
- *Must be UTF-8 encoded
+ *Printable ASCII characters from the space character (\u0020
) through the end of the ASCII character range.
The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.
+ *Printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF
).
The Sid
element in a key policy statement can include spaces. (Spaces are
- * prohibited in the Sid
element of an IAM policy document.)
The tab (\u0009
), line feed (\u000A
), and carriage return (\u000D
) special characters
For information about key policies, see Key policies in KMS in the + * Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the + * Identity and Access Management User Guide + * .
*/ Policy: string | undefined; @@ -4314,22 +4332,22 @@ export interface ReplicateKeyRequest { * * * - *A key policy document must conform to the following rules.
+ *A key policy document can include only the following characters:
*Up to 32 kilobytes (32768 bytes)
- *Must be UTF-8 encoded
+ *Printable ASCII characters from the space character (\u0020
) through the end of the ASCII character range.
The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.
+ *Printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF
).
The Sid
element in a key policy statement can include spaces. (Spaces are
- * prohibited in the Sid
element of an IAM policy document.)
The tab (\u0009
), line feed (\u000A
), and carriage return (\u000D
) special characters
For information about key policies, see Key policies in KMS in the + * Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the + * Identity and Access Management User Guide + * .
*/ Policy?: string; @@ -4760,7 +4778,7 @@ export interface UpdateAliasRequest { /** *Identifies the alias that is changing its KMS key. This value must begin with
* alias/
followed by the alias name, such as alias/ExampleAlias
. You
- * cannot use UpdateAlias to change the alias name.
UpdateAlias
to change the alias name.
*/
AliasName: string | undefined;
diff --git a/clients/client-kms/src/pagination/DescribeCustomKeyStoresPaginator.ts b/clients/client-kms/src/pagination/DescribeCustomKeyStoresPaginator.ts
new file mode 100644
index 0000000000000..95d9ac5d6054f
--- /dev/null
+++ b/clients/client-kms/src/pagination/DescribeCustomKeyStoresPaginator.ts
@@ -0,0 +1,61 @@
+// smithy-typescript generated code
+import { Paginator } from "@aws-sdk/types";
+
+import {
+ DescribeCustomKeyStoresCommand,
+ DescribeCustomKeyStoresCommandInput,
+ DescribeCustomKeyStoresCommandOutput,
+} from "../commands/DescribeCustomKeyStoresCommand";
+import { KMS } from "../KMS";
+import { KMSClient } from "../KMSClient";
+import { KMSPaginationConfiguration } from "./Interfaces";
+
+/**
+ * @private
+ */
+const makePagedClientRequest = async (
+ client: KMSClient,
+ input: DescribeCustomKeyStoresCommandInput,
+ ...args: any
+): PromiseCreates a custom key store that is associated with an CloudHSM cluster that you own and\n manage.
\nThis operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\nBefore you create the custom key store, you must assemble\n the required elements, including an CloudHSM cluster that fulfills the requirements for a custom\n key store. For details about the required elements, see Assemble the Prerequisites\n in the Key Management Service Developer Guide.
\nWhen the operation completes successfully, it returns the ID of the new custom key store.\n Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM\n cluster. Even if you are not going to use your custom key store immediately, you might want to\n connect it to verify that all settings are correct and then disconnect it until you are ready\n to use it.
\nFor help with failures, see Troubleshooting a Custom Key Store in the\n Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n Required permissions: kms:CreateCustomKeyStore (IAM policy).
\n\n Related operations:\n
\n\n DeleteCustomKeyStore\n
\n\n UpdateCustomKeyStore\n
\nCreates a custom key store that is associated with an CloudHSM cluster that you own and\n manage.
\nThis operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\nBefore you create the custom key store, you must assemble\n the required elements, including an CloudHSM cluster that fulfills the requirements for a custom\n key store. For details about the required elements, see Assemble the Prerequisites\n in the Key Management Service Developer Guide.
\nWhen the operation completes successfully, it returns the ID of the new custom key store.\n Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM\n cluster. Even if you are not going to use your custom key store immediately, you might want to\n connect it to verify that all settings are correct and then disconnect it until you are ready\n to use it.
\nFor help with failures, see Troubleshooting a Custom Key Store in the\n Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n Required permissions: kms:CreateCustomKeyStore (IAM policy).
\n\n Related operations:\n
\n\n DeleteCustomKeyStore\n
\n\n UpdateCustomKeyStore\n
\nIdentifies the CloudHSM cluster for the custom key store. Enter the cluster ID of any active\n CloudHSM cluster that is not already associated with a custom key store. To find the cluster ID,\n use the DescribeClusters operation.
", - "smithy.api#required": {} + "smithy.api#documentation": "Identifies the CloudHSM cluster for the custom key store. Enter the cluster ID of any active\n CloudHSM cluster that is not already associated with a custom key store. To find the cluster ID,\n use the DescribeClusters operation.
" } }, "TrustAnchorCertificate": { "target": "com.amazonaws.kms#TrustAnchorCertificateType", "traits": { - "smithy.api#documentation": "Enter the content of the trust anchor certificate for the cluster. This is the content of\n the customerCA.crt
file that you created when you initialized the cluster.
Enter the content of the trust anchor certificate for the cluster. This is the content of\n the customerCA.crt
file that you created when you initialized the cluster.
Enter the password of the \n kmsuser
crypto user\n (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this\n user to manage key material on your behalf.
The password must be a string of 7 to 32 characters. Its value is case sensitive.
\nThis parameter tells KMS the kmsuser
account password; it does not change\n the password in the CloudHSM cluster.
Enter the password of the \n kmsuser
crypto user\n (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this\n user to manage key material on your behalf.
The password must be a string of 7 to 32 characters. Its value is case sensitive.
\nThis parameter tells KMS the kmsuser
account password; it does not change\n the password in the CloudHSM cluster.
Creates a unique customer managed KMS key in your Amazon Web Services account and\n Region.
\nIn addition to the required parameters, you can use the optional parameters to specify a key policy, description, tags, and other useful elements for any key type.
\nKMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.
\nTo create different types of KMS keys, use the following guidance:
\n\nTo create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for\n KeySpec
, SYMMETRIC_DEFAULT
, and the default value for\n KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key.
If you need a key for basic encryption and decryption or you \n are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.
\n\n
To create an asymmetric KMS key, use the KeySpec
parameter to specify\n the type of key material in the KMS key. Then, use the KeyUsage
parameter\n to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.\n You can't change these properties after the KMS key is created.
Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric \n KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key\n so it can be used outside of KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). \n KMS keys with ECC key pairs can be used only to sign and verify messages. \n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
\n\n
To create an HMAC KMS key, set the KeySpec
parameter to a\n key spec value for HMAC KMS keys. Then set the KeyUsage
parameter to\n GENERATE_VERIFY_MAC
. You must set the key usage even though\n GENERATE_VERIFY_MAC
is the only valid key usage value for HMAC KMS keys.\n You can't change these properties after the KMS key is created.
HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use\n HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.
\nHMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC\n KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the\n CreateKey
operation returns an\n UnsupportedOperationException
. For a list of Regions in which HMAC KMS keys\n are supported, see HMAC keys in\n KMS in the Key Management Service Developer Guide.
\n
To create a multi-Region primary key in the local Amazon Web Services Region,\n use the MultiRegion
parameter with a value of True
. To create\n a multi-Region replica key, that is, a KMS key with the same key ID\n and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its\n primary key to a replica key, use the UpdatePrimaryRegion\n operation.
You can create multi-Region KMS keys for all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't create multi-Region keys in a custom key store.
\nThis operation supports multi-Region keys, an KMS feature that lets you create multiple\n interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key\n material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt\n it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.
\n\n
To import your own key material, begin by creating a symmetric encryption KMS key with no key\n material. To do this, use the Origin
parameter of CreateKey
\n with a value of EXTERNAL
. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt\n your key material. Then, use ImportKeyMaterial with your import token\n to import the key material. For step-by-step instructions, see Importing Key Material in the \n Key Management Service Developer Guide\n .
This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key\n material into any other type of KMS key.
\nTo create a multi-Region primary key with imported key material, use the\n Origin
parameter of CreateKey
with a value of\n EXTERNAL
and the MultiRegion
parameter with a value of\n True
. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.
\n
To create a symmetric encryption KMS key in a custom key store, use the\n CustomKeyStoreId
parameter to specify the custom key store. You must also\n use the Origin
parameter with a value of AWS_CLOUDHSM
. The\n CloudHSM cluster that is associated with the custom key store must have at least two active\n HSMs in different Availability Zones in the Amazon Web Services Region.
Custom key stores support only symmetric encryption KMS keys. You cannot create an\n HMAC KMS key or an asymmetric KMS key in a custom key store. For information about\n custom key stores in KMS see Custom key stores in KMS in\n the \n Key Management Service Developer Guide\n .
\n\n Cross-account use: No. You cannot use this operation to\n create a KMS key in a different Amazon Web Services account.
\n\n\n Required permissions: kms:CreateKey (IAM policy). To use the\n Tags
parameter, kms:TagResource (IAM policy). For examples and information about related\n permissions, see Allow a user to create\n KMS keys in the Key Management Service Developer Guide.
\n Related operations:\n
\n\n DescribeKey\n
\n\n ListKeys\n
\n\n ScheduleKeyDeletion\n
\nCreates a unique customer managed KMS key in your Amazon Web Services account and\n Region.
\nIn addition to the required parameters, you can use the optional parameters to specify a key policy, description, tags, and other useful elements for any key type.
\nKMS is replacing the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.
\nTo create different types of KMS keys, use the following guidance:
\n\nTo create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for\n KeySpec
, SYMMETRIC_DEFAULT
, and the default value for\n KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key. For technical details, see\n \n SYMMETRIC_DEFAULT key spec in the Key Management Service Developer Guide.
If you need a key for basic encryption and decryption or you \n are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.
\n\n
To create an asymmetric KMS key, use the KeySpec
parameter to specify\n the type of key material in the KMS key. Then, use the KeyUsage
parameter\n to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.\n You can't change these properties after the KMS key is created.
Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric \n KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key\n so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). \n KMS keys with ECC key pairs can be used only to sign and verify messages. \n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
\n\n
To create an HMAC KMS key, set the KeySpec
parameter to a\n key spec value for HMAC KMS keys. Then set the KeyUsage
parameter to\n GENERATE_VERIFY_MAC
. You must set the key usage even though\n GENERATE_VERIFY_MAC
is the only valid key usage value for HMAC KMS keys.\n You can't change these properties after the KMS key is created.
HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use\n HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.
\nHMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to create an HMAC\n KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the\n CreateKey
operation returns an\n UnsupportedOperationException
. For a list of Regions in which HMAC KMS keys\n are supported, see HMAC keys in\n KMS in the Key Management Service Developer Guide.
\n
To create a multi-Region primary key in the local Amazon Web Services Region,\n use the MultiRegion
parameter with a value of True
. To create\n a multi-Region replica key, that is, a KMS key with the same key ID\n and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its\n primary key to a replica key, use the UpdatePrimaryRegion\n operation.
You can create multi-Region KMS keys for all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't create multi-Region keys in a custom key store.
\nThis operation supports multi-Region keys, an KMS feature that lets you create multiple\n interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key\n material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt\n it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.
\n\n
To import your own key material, begin by creating a symmetric encryption KMS key with no key\n material. To do this, use the Origin
parameter of CreateKey
\n with a value of EXTERNAL
. Next, use GetParametersForImport operation to get a public key and import token, and use the public key to encrypt\n your key material. Then, use ImportKeyMaterial with your import token\n to import the key material. For step-by-step instructions, see Importing Key Material in the \n Key Management Service Developer Guide\n .
This feature supports only symmetric encryption KMS keys, including multi-Region symmetric encryption KMS keys. You cannot import key\n material into any other type of KMS key.
\nTo create a multi-Region primary key with imported key material, use the\n Origin
parameter of CreateKey
with a value of\n EXTERNAL
and the MultiRegion
parameter with a value of\n True
. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.
\n
To create a symmetric encryption KMS key in a custom key store, use the\n CustomKeyStoreId
parameter to specify the custom key store. You must also\n use the Origin
parameter with a value of AWS_CLOUDHSM
. The\n CloudHSM cluster that is associated with the custom key store must have at least two active\n HSMs in different Availability Zones in the Amazon Web Services Region.
Custom key stores support only symmetric encryption KMS keys. You cannot create an\n HMAC KMS key or an asymmetric KMS key in a custom key store. For information about\n custom key stores in KMS see Custom key stores in KMS in\n the \n Key Management Service Developer Guide\n .
\n\n Cross-account use: No. You cannot use this operation to\n create a KMS key in a different Amazon Web Services account.
\n\n\n Required permissions: kms:CreateKey (IAM policy). To use the\n Tags
parameter, kms:TagResource (IAM policy). For examples and information about related\n permissions, see Allow a user to create\n KMS keys in the Key Management Service Developer Guide.
\n Related operations:\n
\n\n DescribeKey\n
\n\n ListKeys\n
\n\n ScheduleKeyDeletion\n
\nThe key policy to attach to the KMS key. If you do not specify a key policy, KMS attaches a default key policy to the KMS key.\n For more information, see Default key policy in the\n Key Management Service Developer Guide.
\nIf you provide a key policy, it must meet the following criteria:
\nIf you don't set BypassPolicyLockoutSafetyCheck
to True
, the key policy\n must allow the principal that is making the CreateKey
request to make a\n subsequent PutKeyPolicy request on the KMS key. This reduces the risk\n that the KMS key becomes unmanageable. For more information, refer to the scenario in the\n Default Key Policy section of the \n Key Management Service Developer Guide\n .
Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services\n Identity and Access Management User Guide.
\nA key policy document must conform to the following rules.
\nUp to 32 kilobytes (32768 bytes)
\nMust be UTF-8 encoded
\nThe only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.
\nThe Sid
element in a key policy statement can include spaces. (Spaces are\n prohibited in the Sid
element of an IAM policy document.)
For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the \n Identity and Access Management User Guide\n .
" + "smithy.api#documentation": "The key policy to attach to the KMS key. If you do not specify a key policy, KMS attaches a default key policy to the KMS key.\n For more information, see Default key policy in the\n Key Management Service Developer Guide.
\nIf you provide a key policy, it must meet the following criteria:
\nIf you don't set BypassPolicyLockoutSafetyCheck
to True
, the key policy\n must allow the principal that is making the CreateKey
request to make a\n subsequent PutKeyPolicy request on the KMS key. This reduces the risk\n that the KMS key becomes unmanageable. For more information, refer to the scenario in the\n Default Key Policy section of the \n Key Management Service Developer Guide\n .
Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services\n Identity and Access Management User Guide.
\nA key policy document can include only the following characters:
\nPrintable ASCII characters from the space character (\\u0020
) through the end of the ASCII character range.
Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF
).
The tab (\\u0009
), line feed (\\u000A
), and carriage return (\\u000D
) special characters
For information about key policies, see Key policies in KMS in the\n Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the \n Identity and Access Management User Guide\n .
" } }, "Description": { @@ -704,7 +705,7 @@ "KeyUsage": { "target": "com.amazonaws.kms#KeyUsageType", "traits": { - "smithy.api#documentation": "Determines the cryptographic operations for which you can use the KMS key. The default value is\n ENCRYPT_DECRYPT
. This parameter is optional when you are creating a symmetric\n encryption KMS key; otherwise, it is required. You\n can't change the KeyUsage
value after the KMS key is created.
Select only one valid value.
\nFor symmetric encryption KMS keys, omit the parameter or specify\n ENCRYPT_DECRYPT
.
For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC
.
For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT
or\n SIGN_VERIFY
.
For asymmetric KMS keys with ECC key material, specify\n SIGN_VERIFY
.
Determines the cryptographic operations for which you can use the KMS key. The default value is\n ENCRYPT_DECRYPT
. This parameter is optional when you are creating a symmetric\n encryption KMS key; otherwise, it is required. You\n can't change the KeyUsage
value after the KMS key is created.
Select only one valid value.
\nFor symmetric encryption KMS keys, omit the parameter or specify\n ENCRYPT_DECRYPT
.
For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC
.
For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT
or\n SIGN_VERIFY
.
For asymmetric KMS keys with ECC key material, specify\n SIGN_VERIFY
.
For asymmetric KMS keys with SM2 key material (China Regions only), specify ENCRYPT_DECRYPT
or\n SIGN_VERIFY
.
Specifies the type of KMS key to create. The default value,\n SYMMETRIC_DEFAULT
, creates a KMS key with a 256-bit symmetric key for encryption\n and decryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the \n Key Management Service Developer Guide\n .
The KeySpec
determines whether the KMS key contains a symmetric key or an\n asymmetric key pair. It also determines the cryptographic algorithms that the KMS key supports. You can't\n change the KeySpec
after the KMS key is created.\n To further restrict the algorithms that can be used with the KMS key, use a condition key in\n its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the \n Key Management Service Developer Guide\n .
\n Amazon Web Services services that\n are integrated with KMS use symmetric encryption KMS keys to protect your data.\n These services do not support asymmetric KMS keys or HMAC KMS keys.
\nKMS supports the following key specs for KMS keys:
\nSymmetric encryption key (default)
\n\n SYMMETRIC_DEFAULT
(AES-256-GCM)
HMAC keys (symmetric)
\n\n HMAC_224
\n
\n HMAC_256
\n
\n HMAC_384
\n
\n HMAC_512
\n
Asymmetric RSA key pairs
\n\n RSA_2048
\n
\n RSA_3072
\n
\n RSA_4096
\n
Asymmetric NIST-recommended elliptic curve key pairs
\n\n ECC_NIST_P256
(secp256r1)
\n ECC_NIST_P384
(secp384r1)
\n ECC_NIST_P521
(secp521r1)
Other asymmetric elliptic curve key pairs
\n\n ECC_SECG_P256K1
(secp256k1), commonly used for\n cryptocurrencies.
Specifies the type of KMS key to create. The default value,\n SYMMETRIC_DEFAULT
, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, \n where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the \n Key Management Service Developer Guide\n .
The KeySpec
determines whether the KMS key contains a symmetric key or an\n asymmetric key pair. It also determines the cryptographic algorithms that the KMS key supports. You can't\n change the KeySpec
after the KMS key is created.\n To further restrict the algorithms that can be used with the KMS key, use a condition key in\n its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the \n Key Management Service Developer Guide\n .
\n Amazon Web Services services that\n are integrated with KMS use symmetric encryption KMS keys to protect your data.\n These services do not support asymmetric KMS keys or HMAC KMS keys.
\nKMS supports the following key specs for KMS keys:
\nSymmetric encryption key (default)
\n\n SYMMETRIC_DEFAULT
\n
HMAC keys (symmetric)
\n\n HMAC_224
\n
\n HMAC_256
\n
\n HMAC_384
\n
\n HMAC_512
\n
Asymmetric RSA key pairs
\n\n RSA_2048
\n
\n RSA_3072
\n
\n RSA_4096
\n
Asymmetric NIST-recommended elliptic curve key pairs
\n\n ECC_NIST_P256
(secp256r1)
\n ECC_NIST_P384
(secp384r1)
\n ECC_NIST_P521
(secp521r1)
Other asymmetric elliptic curve key pairs
\n\n ECC_SECG_P256K1
(secp256k1), commonly used for\n cryptocurrencies.
SM2 key pairs (China Regions only)
\n\n SM2
\n
Creates the KMS key in the specified custom key store and the key material in its\n associated CloudHSM cluster. To create a KMS key in a custom key store, you must also specify the\n Origin
parameter with a value of AWS_CLOUDHSM
. The CloudHSM cluster\n that is associated with the custom key store must have at least two active HSMs, each in a\n different Availability Zone in the Region.
This parameter is valid only for symmetric encryption KMS keys in a single Region. You \n cannot create any other type of KMS key in a custom key store.
\nTo find the ID of a custom key store, use the DescribeCustomKeyStores operation.
\nThe response includes the custom key store ID and the ID of the CloudHSM cluster.
\nThis operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
" + "smithy.api#documentation": "Creates the KMS key in the specified custom key store and the key material in its\n associated CloudHSM cluster. To create a KMS key in a custom key store, you must also specify the\n Origin
parameter with a value of AWS_CLOUDHSM
. The CloudHSM cluster\n that is associated with the custom key store must have at least two active HSMs, each in a\n different Availability Zone in the Region.
This parameter is valid only for symmetric encryption KMS keys in a single Region. You \n cannot create any other type of KMS key in a custom key store.
\nTo find the ID of a custom key store, use the DescribeCustomKeyStores operation.
\nThe response includes the custom key store ID and the ID of the CloudHSM cluster.
\nThis operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
" } }, "BypassPolicyLockoutSafetyCheck": { @@ -961,6 +962,10 @@ { "value": "HMAC_512", "name": "HMAC_512" + }, + { + "value": "SM2", + "name": "SM2" } ] } @@ -996,6 +1001,10 @@ { "value": "ECC_SECG_P256K1", "name": "ECC_SECG_P256K1" + }, + { + "value": "SM2", + "name": "SM2" } ] } @@ -1182,7 +1191,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a custom key store. This operation does not delete the CloudHSM cluster that is\n associated with the custom key store, or affect any users or keys in the cluster.
\nThe custom key store that you delete cannot contain any KMS KMS keys. Before deleting the key store,\n verify that you will never need to use any of the KMS keys in the key store for any\n cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the\n key store. When the scheduled waiting period expires, the ScheduleKeyDeletion
\n operation deletes the KMS keys. Then it makes a best effort to delete the key material from\n the associated cluster. However, you might need to manually delete the orphaned key\n material from the cluster and its backups.
After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore\n to disconnect the key store from KMS. Then, you can delete the custom key store.
\nInstead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from KMS. While the key store is\n disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to\n delete KMS keys and you can reconnect a disconnected custom key store at any time.
\nIf the operation succeeds, it returns a JSON object with no\nproperties.
\nThis operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\n\n Cross-account use: No.\n You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n\n Required permissions: kms:DeleteCustomKeyStore (IAM policy)
\n\n Related operations:\n
\n\n CreateCustomKeyStore\n
\n\n UpdateCustomKeyStore\n
\nDeletes a custom key store. This operation does not delete the CloudHSM cluster that is\n associated with the custom key store, or affect any users or keys in the cluster.
\nThe custom key store that you delete cannot contain any KMS keys. Before deleting the key store,\n verify that you will never need to use any of the KMS keys in the key store for any\n cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the\n key store. When the scheduled waiting period expires, the ScheduleKeyDeletion
\n operation deletes the KMS keys. Then it makes a best effort to delete the key material from\n the associated cluster. However, you might need to manually delete the orphaned key\n material from the cluster and its backups.
After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore\n to disconnect the key store from KMS. Then, you can delete the custom key store.
\nInstead of deleting the custom key store, consider using DisconnectCustomKeyStore to disconnect it from KMS. While the key store is\n disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to\n delete KMS keys and you can reconnect a disconnected custom key store at any time.
\nIf the operation succeeds, it returns a JSON object with no\nproperties.
\nThis operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\n\n Cross-account use: No.\n You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n\n Required permissions: kms:DeleteCustomKeyStore (IAM policy)
\n\n Related operations:\n
\n\n CreateCustomKeyStore\n
\n\n UpdateCustomKeyStore\n
\nGets information about custom key stores in the account and Region.
\nThis operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\nBy default, this operation returns information about all custom key\n stores in the account and Region. To get only information about a particular custom key store,\n use either the CustomKeyStoreName
or CustomKeyStoreId
parameter (but\n not both).
To determine whether the custom key store is connected to its CloudHSM cluster, use the\n ConnectionState
element in the response. If an attempt to connect the custom\n key store failed, the ConnectionState
value is FAILED
and the\n ConnectionErrorCode
element in the response indicates the cause of the failure.\n For help interpreting the ConnectionErrorCode
, see CustomKeyStoresListEntry.
Custom key stores have a DISCONNECTED
connection state if the key store has\n never been connected or you use the DisconnectCustomKeyStore operation to\n disconnect it. If your custom key store state is CONNECTED
but you are having\n trouble using it, make sure that its associated CloudHSM cluster is active and contains the\n minimum number of HSMs required for the operation, if any.
For help repairing your custom key store, see the Troubleshooting Custom Key Stores topic in the\n Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n Required permissions: kms:DescribeCustomKeyStores (IAM policy)
\n\n Related operations:\n
\n\n CreateCustomKeyStore\n
\n\n DeleteCustomKeyStore\n
\n\n UpdateCustomKeyStore\n
\nGets information about custom key stores in the account and Region.
\nThis operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\nBy default, this operation returns information about all custom key\n stores in the account and Region. To get only information about a particular custom key store,\n use either the CustomKeyStoreName
or CustomKeyStoreId
parameter (but\n not both).
To determine whether the custom key store is connected to its CloudHSM cluster, use the\n ConnectionState
element in the response. If an attempt to connect the custom\n key store failed, the ConnectionState
value is FAILED
and the\n ConnectionErrorCode
element in the response indicates the cause of the failure.\n For help interpreting the ConnectionErrorCode
, see CustomKeyStoresListEntry.
Custom key stores have a DISCONNECTED
connection state if the key store has\n never been connected or you use the DisconnectCustomKeyStore operation to\n disconnect it. If your custom key store state is CONNECTED
but you are having\n trouble using it, make sure that its associated CloudHSM cluster is active and contains the\n minimum number of HSMs required for the operation, if any.
For help repairing your custom key store, see the Troubleshooting Custom Key Stores topic in the\n Key Management Service Developer Guide.
\n\n Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n Required permissions: kms:DescribeCustomKeyStores (IAM policy)
\n\n Related operations:\n
\n\n CreateCustomKeyStore\n
\n\n DeleteCustomKeyStore\n
\n\n UpdateCustomKeyStore\n
\nProvides detailed information about a KMS key. You can run DescribeKey
on a\n customer managed\n key or an Amazon Web Services managed key.
This detailed information includes the key ARN, creation date (and deletion date, if\n applicable), the key state, and the origin and expiration date (if any) of the key material.\n It includes fields, like KeySpec
, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports. For KMS keys in custom key stores, it includes\n information about the custom key store, such as the key store ID and the CloudHSM cluster ID. For\n multi-Region keys, it displays the primary key and all related replica keys.
\n DescribeKey
does not return the following information:
Aliases associated with the KMS key. To get this information, use ListAliases.
\nWhether automatic key rotation is enabled on the KMS key. To get this information, use\n GetKeyRotationStatus. Also, some key states prevent a KMS key from\n being automatically rotated. For details, see How Automatic Key Rotation\n Works in Key Management Service Developer Guide.
\nTags on the KMS key. To get this information, use ListResourceTags.
\nKey policies and grants on the KMS key. To get this information, use GetKeyPolicy and ListGrants.
\nIn general, DescribeKey
is a non-mutating operation. It returns data about\n KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey
to\n create Amazon Web Services\n managed keys from a predefined Amazon Web Services alias with no key\n ID.
\n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:DescribeKey (key policy)
\n\n Related operations:\n
\n\n GetKeyPolicy\n
\n\n GetKeyRotationStatus\n
\n\n ListAliases\n
\n\n ListGrants\n
\n\n ListKeys\n
\n\n ListResourceTags\n
\n\n ListRetirableGrants\n
\nProvides detailed information about a KMS key. You can run DescribeKey
on a\n customer managed\n key or an Amazon Web Services managed key.
This detailed information includes the key ARN, creation date (and deletion date, if\n applicable), the key state, and the origin and expiration date (if any) of the key material.\n It includes fields, like KeySpec
, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports. For KMS keys in custom key stores, it includes\n information about the custom key store, such as the key store ID and the CloudHSM cluster ID. For\n multi-Region keys, it displays the primary key and all related replica keys.
\n DescribeKey
does not return the following information:
Aliases associated with the KMS key. To get this information, use ListAliases.
\nWhether automatic key rotation is enabled on the KMS key. To get this information, use\n GetKeyRotationStatus. Also, some key states prevent a KMS key from\n being automatically rotated. For details, see How Automatic Key Rotation\n Works in the Key Management Service Developer Guide.
\nTags on the KMS key. To get this information, use ListResourceTags.
\nKey policies and grants on the KMS key. To get this information, use GetKeyPolicy and ListGrants.
\nIn general, DescribeKey
is a non-mutating operation. It returns data about\n KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey
to\n create Amazon Web Services\n managed keys from a predefined Amazon Web Services alias with no key\n ID.
\n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:DescribeKey (key policy)
\n\n Related operations:\n
\n\n GetKeyPolicy\n
\n\n GetKeyRotationStatus\n
\n\n ListAliases\n
\n\n ListGrants\n
\n\n ListKeys\n
\n\n ListResourceTags\n
\n\n ListRetirableGrants\n
\nDisconnects the custom key store from its associated CloudHSM cluster. While a custom key\n store is disconnected, you can manage the custom key store and its KMS keys, but you cannot\n create or use KMS keys in the custom key store. You can reconnect the custom key store at any\n time.
\nWhile a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will\n fail. This action can prevent users from storing and accessing sensitive data.
\nTo find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the\n ConnectCustomKeyStore operation.
\nIf the operation succeeds, it returns a JSON object with no\nproperties.
\nThis operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\n\n\n Cross-account use: No.\n You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n\n Required permissions: kms:DisconnectCustomKeyStore (IAM policy)
\n\n Related operations:\n
\n\n CreateCustomKeyStore\n
\n\n DeleteCustomKeyStore\n
\n\n UpdateCustomKeyStore\n
\nDisconnects the custom key store from its associated CloudHSM cluster. While a custom key\n store is disconnected, you can manage the custom key store and its KMS keys, but you cannot\n create or use KMS keys in the custom key store. You can reconnect the custom key store at any\n time.
\nWhile a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will\n fail. This action can prevent users from storing and accessing sensitive data.
\nTo find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the\n ConnectCustomKeyStore operation.
\nIf the operation succeeds, it returns a JSON object with no\nproperties.
\nThis operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\n\n\n Cross-account use: No.\n You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n\n Required permissions: kms:DisconnectCustomKeyStore (IAM policy)
\n\n Related operations:\n
\n\n CreateCustomKeyStore\n
\n\n DeleteCustomKeyStore\n
\n\n UpdateCustomKeyStore\n
\nEncrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or\n asymmetric KMS key with a KeyUsage
of ENCRYPT_DECRYPT
.
You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or\n database password, or other sensitive information. You don't need to use the Encrypt
operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a\n plaintext data key and an encrypted copy of that data key.
If you use a symmetric encryption KMS key, you can use an encryption context to add additional\n security to your encryption operation. If you specify an EncryptionContext
when\n encrypting data, you must specify the same encryption context (a case-sensitive exact match)\n when decrypting the data. Otherwise, the request to decrypt fails with an\n InvalidCiphertextException
. For more information, see Encryption\n Context in the Key Management Service Developer Guide.
If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The\n algorithm must be compatible with the KMS key type.
\nWhen you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.
\nYou are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.
\nThe maximum size of the data that you can encrypt varies with the type of KMS key and the\n encryption algorithm that you choose.
\nSymmetric encryption KMS keys
\n\n SYMMETRIC_DEFAULT
: 4096 bytes
\n RSA_2048
\n
\n RSAES_OAEP_SHA_1
: 214 bytes
\n RSAES_OAEP_SHA_256
: 190 bytes
\n RSA_3072
\n
\n RSAES_OAEP_SHA_1
: 342 bytes
\n RSAES_OAEP_SHA_256
: 318 bytes
\n RSA_4096
\n
\n RSAES_OAEP_SHA_1
: 470 bytes
\n RSAES_OAEP_SHA_256
: 446 bytes
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: Yes.\n To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:Encrypt (key policy)
\n\n Related operations:\n
\n\n Decrypt\n
\n\n GenerateDataKey\n
\n\n GenerateDataKeyPair\n
\nEncrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or\n asymmetric KMS key with a KeyUsage
of ENCRYPT_DECRYPT
.
You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or\n database password, or other sensitive information. You don't need to use the Encrypt
operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a\n plaintext data key and an encrypted copy of that data key.
If you use a symmetric encryption KMS key, you can use an encryption context to add additional\n security to your encryption operation. If you specify an EncryptionContext
when\n encrypting data, you must specify the same encryption context (a case-sensitive exact match)\n when decrypting the data. Otherwise, the request to decrypt fails with an\n InvalidCiphertextException
. For more information, see Encryption\n Context in the Key Management Service Developer Guide.
If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The\n algorithm must be compatible with the KMS key spec.
\nWhen you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.
\nYou are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.
\nThe maximum size of the data that you can encrypt varies with the type of KMS key and the\n encryption algorithm that you choose.
\nSymmetric encryption KMS keys
\n\n SYMMETRIC_DEFAULT
: 4096 bytes
\n RSA_2048
\n
\n RSAES_OAEP_SHA_1
: 214 bytes
\n RSAES_OAEP_SHA_256
: 190 bytes
\n RSA_3072
\n
\n RSAES_OAEP_SHA_1
: 342 bytes
\n RSAES_OAEP_SHA_256
: 318 bytes
\n RSA_4096
\n
\n RSAES_OAEP_SHA_1
: 470 bytes
\n RSAES_OAEP_SHA_256
: 446 bytes
\n SM2PKE
: 1024 bytes (China Regions only)
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: Yes.\n To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:Encrypt (key policy)
\n\n Related operations:\n
\n\n Decrypt\n
\n\n GenerateDataKey\n
\n\n GenerateDataKeyPair\n
\nReturns a unique symmetric data key for use outside of KMS. This operation returns a\n plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS\n key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS\n key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted\n data key with the encrypted data.
\n\nTo generate a data key, specify the symmetric encryption KMS key that will be used to\n encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the\n type of your KMS key, use the DescribeKey operation. You must also specify the length of\n the data key. Use either the KeySpec
or NumberOfBytes
parameters\n (but not both). For 128-bit and 256-bit data keys, use the KeySpec
parameter.
To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use\n the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure\n random byte string, use GenerateRandom.
\n\nYou can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext
, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException
. For more information, see Encryption Context in the\n Key Management Service Developer Guide.
Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n How to use your data\n key\n
\nWe recommend that you use the following pattern to encrypt data locally in your\n application. You can write your own code or use a client-side encryption library, such as the\n Amazon Web Services Encryption SDK, the\n Amazon DynamoDB Encryption Client,\n or Amazon S3\n client-side encryption to do these tasks for you.
\nTo encrypt data outside of KMS:
\nUse the GenerateDataKey
operation to get a data key.
Use the plaintext data key (in the Plaintext
field of the response) to\n encrypt your data outside of KMS. Then erase the plaintext data key from memory.
Store the encrypted data key (in the CiphertextBlob
field of the\n response) with the encrypted data.
To decrypt data outside of KMS:
\nUse the Decrypt operation to decrypt the encrypted data key. The\n operation returns a plaintext copy of the data key.
\nUse the plaintext data key to decrypt data outside of KMS, then erase the plaintext\n data key from memory.
\n\n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:GenerateDataKey (key policy)
\n\n Related operations:\n
\n\n Decrypt\n
\n\n Encrypt\n
\n\n GenerateDataKeyPair\n
\nReturns a unique symmetric data key for use outside of KMS. This operation returns a\n plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS\n key that you specify. The bytes in the plaintext key are random; they are not related \n to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS \n and store the encrypted data key with the encrypted data.
\n\nTo generate a data key, specify the symmetric encryption KMS key that will be used to\n encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the\n type of your KMS key, use the DescribeKey operation.
\n \nYou must also specify the length of the data key. Use either the KeySpec
or \n NumberOfBytes
parameters (but not both). For 128-bit and 256-bit data keys, use \n the KeySpec
parameter.
To generate an SM4 data key (China Regions only), specify a KeySpec
value of\n AES_128
or NumberOfBytes
value of 128
. The symmetric \n encryption key used in China Regions to encrypt your data key is an SM4 encryption key.
To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use\n the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure\n random byte string, use GenerateRandom.
\n\nYou can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext
, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException
. For more information, see Encryption Context in the\n Key Management Service Developer Guide.
Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n How to use your data\n key\n
\nWe recommend that you use the following pattern to encrypt data locally in your\n application. You can write your own code or use a client-side encryption library, such as the\n Amazon Web Services Encryption SDK, the\n Amazon DynamoDB Encryption Client,\n or Amazon S3\n client-side encryption to do these tasks for you.
\nTo encrypt data outside of KMS:
\nUse the GenerateDataKey
operation to get a data key.
Use the plaintext data key (in the Plaintext
field of the response) to\n encrypt your data outside of KMS. Then erase the plaintext data key from memory.
Store the encrypted data key (in the CiphertextBlob
field of the\n response) with the encrypted data.
To decrypt data outside of KMS:
\nUse the Decrypt operation to decrypt the encrypted data key. The\n operation returns a plaintext copy of the data key.
\nUse the plaintext data key to decrypt data outside of KMS, then erase the plaintext\n data key from memory.
\n\n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:GenerateDataKey (key policy)
\n\n Related operations:\n
\n\n Decrypt\n
\n\n Encrypt\n
\n\n GenerateDataKeyPair\n
\nReturns a unique asymmetric data key pair for use outside of KMS. This operation returns\n a plaintext public key, a plaintext private key, and a copy of the private key that is\n encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to\n perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes\n in the keys are random; they not related to the caller or to the KMS key that is used to encrypt the\n private key.
\n\nYou can use the public key that GenerateDataKeyPair
returns to encrypt data\n or verify a signature outside of KMS. Then, store the encrypted private key with the data.\n When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.
To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt\n the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a\n custom key store. To get the type and origin of your KMS key, use the DescribeKey\n operation.
\nUse the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data\n key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for\n either encryption or signing, but not both. However, KMS cannot enforce any restrictions on\n the use of data key pairs outside of KMS.
If you are using the data key pair to encrypt data, or for any operation where you don't\n immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation.\n GenerateDataKeyPairWithoutPlaintext
returns a plaintext public key and an\n encrypted private key, but omits the plaintext private key that you need only to decrypt\n ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use\n the Decrypt operation to decrypt the encrypted private key in the data key\n pair.
\n GenerateDataKeyPair
returns a unique data key pair for each request. The\n bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the\n private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in\n RFC 5280. The private key is a\n DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.
You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext
, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException
. For more information, see Encryption Context in the\n Key Management Service Developer Guide.
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account\n use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:GenerateDataKeyPair (key policy)
\n\n Related operations:\n
\n\n Decrypt\n
\n\n Encrypt\n
\n\n GenerateDataKey\n
\nReturns a unique asymmetric data key pair for use outside of KMS. This operation returns\n a plaintext public key, a plaintext private key, and a copy of the private key that is\n encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to\n perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes\n in the keys are random; they not related to the caller or to the KMS key that is used to encrypt the\n private key.
\n\nYou can use the public key that GenerateDataKeyPair
returns to encrypt data\n or verify a signature outside of KMS. Then, store the encrypted private key with the data.\n When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.
To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt\n the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a\n custom key store. To get the type and origin of your KMS key, use the DescribeKey\n operation.
\nUse the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data\n key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use\n ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both.\n However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.
If you are using the data key pair to encrypt data, or for any operation where you don't\n immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation.\n GenerateDataKeyPairWithoutPlaintext
returns a plaintext public key and an\n encrypted private key, but omits the plaintext private key that you need only to decrypt\n ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use\n the Decrypt operation to decrypt the encrypted private key in the data key\n pair.
\n GenerateDataKeyPair
returns a unique data key pair for each request. The\n bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the\n private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in\n RFC 5280. The private key is a\n DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.
You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext
, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException
. For more information, see Encryption Context in the\n Key Management Service Developer Guide.
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account\n use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:GenerateDataKeyPair (key policy)
\n\n Related operations:\n
\n\n Decrypt\n
\n\n Encrypt\n
\n\n GenerateDataKey\n
\nDetermines the type of data key pair that is generated.
\nThe KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS.
", + "smithy.api#documentation": "Determines the type of data key pair that is generated.
\nThe KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.
", "smithy.api#required": {} } }, @@ -1992,7 +2011,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns a unique asymmetric data key pair for use outside of KMS. This operation returns\n a plaintext public key and a copy of the private key that is encrypted under the symmetric\n encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation\n does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller\n or to the KMS key that is used to encrypt the private key.
\nYou can use the public key that GenerateDataKeyPairWithoutPlaintext
returns\n to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key\n with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.
To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt\n the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a\n custom key store. To get the type and origin of your KMS key, use the DescribeKey\n operation.
\nUse the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data\n key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for\n either encryption or signing, but not both. However, KMS cannot enforce any restrictions on\n the use of data key pairs outside of KMS.
\n GenerateDataKeyPairWithoutPlaintext
returns a unique data key pair for each\n request. The bytes in the key are not related to the caller or KMS key that is used to encrypt\n the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in\n RFC 5280.
You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext
, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException
. For more information, see Encryption Context in the\n Key Management Service Developer Guide.
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account\n use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key\n policy)
\n\n Related operations:\n
\n\n Decrypt\n
\n\n Encrypt\n
\n\n GenerateDataKey\n
\n\n GenerateDataKeyPair\n
\nReturns a unique asymmetric data key pair for use outside of KMS. This operation returns\n a plaintext public key and a copy of the private key that is encrypted under the symmetric\n encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation\n does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller\n or to the KMS key that is used to encrypt the private key.
\nYou can use the public key that GenerateDataKeyPairWithoutPlaintext
returns\n to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key\n with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.
To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt\n the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a\n custom key store. To get the type and origin of your KMS key, use the DescribeKey\n operation.
\nUse the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data\n key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you \n use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not\n both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.
\n GenerateDataKeyPairWithoutPlaintext
returns a unique data key pair for each\n request. The bytes in the key are not related to the caller or KMS key that is used to encrypt\n the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in\n RFC 5280.
You can use an optional encryption context to add additional security to the encryption\n operation. If you specify an EncryptionContext
, you must specify the same\n encryption context (a case-sensitive exact match) when decrypting the encrypted data key.\n Otherwise, the request to decrypt fails with an InvalidCiphertextException
. For more information, see Encryption Context in the\n Key Management Service Developer Guide.
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account\n use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key\n policy)
\n\n Related operations:\n
\n\n Decrypt\n
\n\n Encrypt\n
\n\n GenerateDataKey\n
\n\n GenerateDataKeyPair\n
\nDetermines the type of data key pair that is generated.
\nThe KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS.
", + "smithy.api#documentation": "Determines the type of data key pair that is generated.
\nThe KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.
", "smithy.api#required": {} } }, @@ -2318,7 +2337,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns a random byte string that is cryptographically secure.
\nBy default, the random byte string is generated in KMS. To generate the byte string in\n the CloudHSM cluster that is associated with a custom key store, specify the custom key store\n ID.
\nApplications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.
\nFor more information about entropy and random number generation, see\n Key Management Service Cryptographic Details.
\n\n\n Required permissions: kms:GenerateRandom (IAM policy)
" + "smithy.api#documentation": "Returns a random byte string that is cryptographically secure.
\nYou must use the NumberOfBytes
parameter to specify the length of the random\n byte string. There is no default value for string length.
By default, the random byte string is generated in KMS. To generate the byte string in\n the CloudHSM cluster that is associated with a custom key store, specify the custom key store\n ID.
\nApplications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.
\nFor more information about entropy and random number generation, see\n Key Management Service Cryptographic Details.
\n\n Cross-account use: Not applicable. GenerateRandom
does not use any account-specific resources, such as KMS keys.
\n Required permissions: kms:GenerateRandom (IAM policy)
" } }, "com.amazonaws.kms#GenerateRandomRequest": { @@ -2327,7 +2346,7 @@ "NumberOfBytes": { "target": "com.amazonaws.kms#NumberOfBytesType", "traits": { - "smithy.api#documentation": "The length of the byte string.
" + "smithy.api#documentation": "The length of the random byte string. This parameter is required.
" } }, "CustomKeyStoreId": { @@ -2591,7 +2610,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric\n KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey
\n permission can download the public key of an asymmetric KMS key. You can share the public key\n to allow others to encrypt messages and verify signatures outside of KMS.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
You do not need to download the public key. Instead, you can use the public key within\n KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the\n public key within KMS, you benefit from the authentication, authorization, and logging that\n are part of every KMS operation. You also reduce of risk of encrypting data that cannot be\n decrypted. These features are not effective outside of KMS. For details, see Special\n Considerations for Downloading Public Keys.
\nTo help you use the public key safely outside of KMS, GetPublicKey
returns\n important information about the public key in the response, including:
\n KeySpec: The type of key material in the public key, such as\n RSA_4096
or ECC_NIST_P521
.
\n KeyUsage: Whether the key is used for encryption or signing.
\n\n EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing\n algorithms for the key.
\nAlthough KMS cannot enforce these restrictions on external operations, it is crucial\n that you use this information to prevent the public key from being used improperly. For\n example, you can prevent a public signing key from being used encrypt data, or prevent a\n public key from being used with an encryption algorithm that is not supported by KMS. You\n can also avoid errors, such as using the wrong signing algorithm in a verification\n operation.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use:\n Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:GetPublicKey (key policy)
\n\n Related operations: CreateKey\n
" + "smithy.api#documentation": "Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric\n KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey
\n permission can download the public key of an asymmetric KMS key. You can share the public key\n to allow others to encrypt messages and verify signatures outside of KMS.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
You do not need to download the public key. Instead, you can use the public key within\n KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the\n public key within KMS, you benefit from the authentication, authorization, and logging that\n are part of every KMS operation. You also reduce of risk of encrypting data that cannot be\n decrypted. These features are not effective outside of KMS.
\nTo verify a signature outside of KMS with an SM2 public key (China Regions only), you must \n specify the distinguishing ID. By default, KMS uses 1234567812345678
as the \n distinguishing ID. For more information, see Offline verification\n with SM2 key pairs.
To help you use the public key safely outside of KMS, GetPublicKey
returns\n important information about the public key in the response, including:
\n KeySpec: The type of key material in the public key, such as\n RSA_4096
or ECC_NIST_P521
.
\n KeyUsage: Whether the key is used for encryption or signing.
\n\n EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing\n algorithms for the key.
\nAlthough KMS cannot enforce these restrictions on external operations, it is crucial\n that you use this information to prevent the public key from being used improperly. For\n example, you can prevent a public signing key from being used encrypt data, or prevent a\n public key from being used with an encryption algorithm that is not supported by KMS. You\n can also avoid errors, such as using the wrong signing algorithm in a verification\n operation.
\nThe KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use:\n Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:GetPublicKey (key policy)
\n\n Related operations: CreateKey\n
" } }, "com.amazonaws.kms#GetPublicKeyRequest": { @@ -3459,6 +3478,10 @@ { "value": "HMAC_512", "name": "HMAC_512" + }, + { + "value": "SM2", + "name": "SM2" } ] } @@ -3930,7 +3953,13 @@ } ], "traits": { - "smithy.api#documentation": "Returns all tags on the specified KMS key.
\nFor general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in\n the Amazon Web Services General Reference. For information about using\n tags in KMS, see Tagging\n keys.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n\n Required permissions: kms:ListResourceTags (key policy)
\n\n Related operations:\n
\n\n CreateKey\n
\n\n ReplicateKey\n
\n\n TagResource\n
\n\n UntagResource\n
\nReturns all tags on the specified KMS key.
\nFor general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in\n the Amazon Web Services General Reference. For information about using\n tags in KMS, see Tagging\n keys.
\n\n Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.
\n\n\n Required permissions: kms:ListResourceTags (key policy)
\n\n Related operations:\n
\n\n CreateKey\n
\n\n ReplicateKey\n
\n\n TagResource\n
\n\n UntagResource\n
\nReturns information about all grants in the Amazon Web Services account and Region that have the\n specified retiring principal.
\nYou can specify any principal in your Amazon Web Services account. The grants that are returned include\n grants for KMS keys in your Amazon Web Services account and other Amazon Web Services accounts. You might use this\n operation to determine which grants you may retire. To retire a grant, use the RetireGrant operation.
\nFor detailed information about grants, including grant terminology, see Grants in KMS in the\n \n Key Management Service Developer Guide\n . For examples of working with grants in several\n programming languages, see Programming grants.
\n\n Cross-account use: You must specify a principal in your\n Amazon Web Services account. However, this operation can return grants in any Amazon Web Services account. You do not need\n kms:ListRetirableGrants
permission (or any other additional permission) in any\n Amazon Web Services account other than your own.
\n Required permissions: kms:ListRetirableGrants (IAM policy) in your\n Amazon Web Services account.
\n\n Related operations:\n
\n\n CreateGrant\n
\n\n ListGrants\n
\n\n RetireGrant\n
\n\n RevokeGrant\n
\nReturns information about all grants in the Amazon Web Services account and Region that have the\n specified retiring principal.
\nYou can specify any principal in your Amazon Web Services account. The grants that are returned include\n grants for KMS keys in your Amazon Web Services account and other Amazon Web Services accounts. You might use this\n operation to determine which grants you may retire. To retire a grant, use the RetireGrant operation.
\nFor detailed information about grants, including grant terminology, see Grants in KMS in the\n \n Key Management Service Developer Guide\n . For examples of working with grants in several\n programming languages, see Programming grants.
\n\n Cross-account use: You must specify a principal in your\n Amazon Web Services account. However, this operation can return grants in any Amazon Web Services account. You do not need\n kms:ListRetirableGrants
permission (or any other additional permission) in any\n Amazon Web Services account other than your own.
\n Required permissions: kms:ListRetirableGrants (IAM policy) in your\n Amazon Web Services account.
\n\n Related operations:\n
\n\n CreateGrant\n
\n\n ListGrants\n
\n\n RetireGrant\n
\n\n RevokeGrant\n
\nThe key policy to attach to the KMS key.
\nThe key policy must meet the following criteria:
\nIf you don't set BypassPolicyLockoutSafetyCheck
to true, the key policy\n must allow the principal that is making the PutKeyPolicy
request to make a\n subsequent PutKeyPolicy
request on the KMS key. This reduces the risk that\n the KMS key becomes unmanageable. For more information, refer to the scenario in the\n Default Key Policy section of the Key Management Service Developer Guide.
Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services\n Identity and Access Management User Guide.
\nA key policy document must conform to the following rules.
\nUp to 32 kilobytes (32768 bytes)
\nMust be UTF-8 encoded
\nThe only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.
\nThe Sid
element in a key policy statement can include spaces. (Spaces are\n prohibited in the Sid
element of an IAM policy document.)
The key policy to attach to the KMS key.
\nThe key policy must meet the following criteria:
\nIf you don't set BypassPolicyLockoutSafetyCheck
to true, the key policy\n must allow the principal that is making the PutKeyPolicy
request to make a\n subsequent PutKeyPolicy
request on the KMS key. This reduces the risk that\n the KMS key becomes unmanageable. For more information, refer to the scenario in the\n Default Key Policy section of the Key Management Service Developer Guide.
Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services\n Identity and Access Management User Guide.
\nA key policy document can include only the following characters:
\nPrintable ASCII characters from the space character (\\u0020
) through the end of the ASCII character range.
Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF
).
The tab (\\u0009
), line feed (\\u000A
), and carriage return (\\u000D
) special characters
For information about key policies, see Key policies in KMS in the\n Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the \n Identity and Access Management User Guide\n .
", "smithy.api#required": {} } }, @@ -4566,7 +4601,7 @@ "Policy": { "target": "com.amazonaws.kms#PolicyType", "traits": { - "smithy.api#documentation": "The key policy to attach to the KMS key. This parameter is optional. If you do not provide\n a key policy, KMS attaches the default key policy to the\n KMS key.
\nThe key policy is not a shared property of multi-Region keys. You can specify the same key\n policy or a different key policy for each key in a set of related multi-Region keys. KMS\n does not synchronize this property.
\nIf you provide a key policy, it must meet the following criteria:
\nIf you don't set BypassPolicyLockoutSafetyCheck
to true, the key policy\n must give the caller kms:PutKeyPolicy
permission on the replica key. This\n reduces the risk that the KMS key becomes unmanageable. For more information, refer to the\n scenario in the Default Key Policy section of the \n Key Management Service Developer Guide\n .
Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the\n \n Identity and Access Management User Guide\n .
\nA key policy document must conform to the following rules.
\nUp to 32 kilobytes (32768 bytes)
\nMust be UTF-8 encoded
\nThe only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.
\nThe Sid
element in a key policy statement can include spaces. (Spaces are\n prohibited in the Sid
element of an IAM policy document.)
The key policy to attach to the KMS key. This parameter is optional. If you do not provide\n a key policy, KMS attaches the default key policy to the\n KMS key.
\nThe key policy is not a shared property of multi-Region keys. You can specify the same key\n policy or a different key policy for each key in a set of related multi-Region keys. KMS\n does not synchronize this property.
\nIf you provide a key policy, it must meet the following criteria:
\nIf you don't set BypassPolicyLockoutSafetyCheck
to true, the key policy\n must give the caller kms:PutKeyPolicy
permission on the replica key. This\n reduces the risk that the KMS key becomes unmanageable. For more information, refer to the\n scenario in the Default Key Policy section of the \n Key Management Service Developer Guide\n .
Each statement in the key policy must contain one or more principals. The principals\n in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services\n principal (for example, an IAM user or role), you might need to enforce a delay before\n including the new principal in a key policy because the new principal might not be\n immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the\n \n Identity and Access Management User Guide\n .
\nA key policy document can include only the following characters:
\nPrintable ASCII characters from the space character (\\u0020
) through the end of the ASCII character range.
Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF
).
The tab (\\u0009
), line feed (\\u000A
), and carriage return (\\u000D
) special characters
For information about key policies, see Key policies in KMS in the\n Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the \n Identity and Access Management User Guide\n .
" } }, "BypassPolicyLockoutSafetyCheck": { @@ -4935,6 +4970,10 @@ { "value": "ECDSA_SHA_512", "name": "ECDSA_SHA_512" + }, + { + "value": "SM2DSA", + "name": "SM2DSA" } ] } @@ -5348,7 +5387,7 @@ "AliasName": { "target": "com.amazonaws.kms#AliasNameType", "traits": { - "smithy.api#documentation": "Identifies the alias that is changing its KMS key. This value must begin with\n alias/
followed by the alias name, such as alias/ExampleAlias
. You\n cannot use UpdateAlias to change the alias name.
Identifies the alias that is changing its KMS key. This value must begin with\n alias/
followed by the alias name, such as alias/ExampleAlias
. You\n cannot use UpdateAlias
to change the alias name.
Changes the properties of a custom key store. Use the CustomKeyStoreId
\n parameter to identify the custom key store you want to edit. Use the remaining parameters to\n change the properties of the custom key store.
You can only update a custom key store that is disconnected. To disconnect the custom key\n store, use DisconnectCustomKeyStore. To reconnect the custom key store after\n the update completes, use ConnectCustomKeyStore. To find the connection\n state of a custom key store, use the DescribeCustomKeyStores\n operation.
\nThe CustomKeyStoreId
parameter is required in all commands. Use the other\n parameters of UpdateCustomKeyStore
to edit your key store settings.
Use the NewCustomKeyStoreName
parameter to change the friendly name of\n the custom key store to the value that you specify.
\n
Use the KeyStorePassword
parameter tell KMS the current password of the\n \n kmsuser
crypto user (CU) in the associated CloudHSM cluster. You\n can use this parameter to fix connection\n failures that occur when KMS cannot log into the associated cluster because\n the kmsuser
password has changed. This value does not change the password in\n the CloudHSM cluster.
\n
Use the CloudHsmClusterId
parameter to associate the custom key store\n with a different, but related, CloudHSM cluster. You can use this parameter to repair a\n custom key store if its CloudHSM cluster becomes corrupted or is deleted, or when you need to\n create or restore a cluster from a backup.
If the operation succeeds, it returns a JSON object with no\nproperties.
\nThis operation is part of the Custom Key Store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\n\n Cross-account\n use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n Required permissions: kms:UpdateCustomKeyStore (IAM policy)
\n\n Related operations:\n
\n\n CreateCustomKeyStore\n
\n\n DeleteCustomKeyStore\n
\nChanges the properties of a custom key store. Use the CustomKeyStoreId
\n parameter to identify the custom key store you want to edit. Use the remaining parameters to\n change the properties of the custom key store.
You can only update a custom key store that is disconnected. To disconnect the custom key\n store, use DisconnectCustomKeyStore. To reconnect the custom key store after\n the update completes, use ConnectCustomKeyStore. To find the connection\n state of a custom key store, use the DescribeCustomKeyStores\n operation.
\nThe CustomKeyStoreId
parameter is required in all commands. Use the other\n parameters of UpdateCustomKeyStore
to edit your key store settings.
Use the NewCustomKeyStoreName
parameter to change the friendly name of\n the custom key store to the value that you specify.
\n
Use the KeyStorePassword
parameter tell KMS the current password of the\n \n kmsuser
crypto user (CU) in the associated CloudHSM cluster. You\n can use this parameter to fix connection\n failures that occur when KMS cannot log into the associated cluster because\n the kmsuser
password has changed. This value does not change the password in\n the CloudHSM cluster.
\n
Use the CloudHsmClusterId
parameter to associate the custom key store\n with a different, but related, CloudHSM cluster. You can use this parameter to repair a\n custom key store if its CloudHSM cluster becomes corrupted or is deleted, or when you need to\n create or restore a cluster from a backup.
If the operation succeeds, it returns a JSON object with no\nproperties.
\nThis operation is part of the custom key store feature feature in KMS, which\ncombines the convenience and extensive integration of KMS with the isolation and control of a\nsingle-tenant key store.
\n\n Cross-account\n use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.
\n\n Required permissions: kms:UpdateCustomKeyStore (IAM policy)
\n\n Related operations:\n
\n\n CreateCustomKeyStore\n
\n\n DeleteCustomKeyStore\n
\nVerifies a digital signature that was generated by the Sign operation.
\n \nVerification confirms that an authorized user signed the message with the specified KMS\n key and signing algorithm, and the message hasn't changed since it was signed. If the\n signature is verified, the value of the SignatureValid
field in the response is\n True
. If the signature verification fails, the Verify
operation\n fails with an KMSInvalidSignatureException
exception.
A digital signature is generated by using the private key in an asymmetric KMS key. The\n signature is verified by using the public key in the same asymmetric KMS key.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
\nTo verify a digital signature, you can use the Verify
operation. Specify the\n same asymmetric KMS key, message, and signing algorithm that were used to produce the\n signature.
You can also verify the digital signature by using the public key of the KMS key outside\n of KMS. Use the GetPublicKey operation to download the public key in the\n asymmetric KMS key and then use the public key to verify the signature outside of KMS. The\n advantage of using the Verify
operation is that it is performed within KMS. As\n a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged\n in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use\n the KMS key to verify signatures.
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:Verify (key policy)
\n\n Related operations: Sign\n
" + "smithy.api#documentation": "Verifies a digital signature that was generated by the Sign operation.
\n \nVerification confirms that an authorized user signed the message with the specified KMS\n key and signing algorithm, and the message hasn't changed since it was signed. If the\n signature is verified, the value of the SignatureValid
field in the response is\n True
. If the signature verification fails, the Verify
operation\n fails with an KMSInvalidSignatureException
exception.
A digital signature is generated by using the private key in an asymmetric KMS key. The\n signature is verified by using the public key in the same asymmetric KMS key.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
\nTo verify a digital signature, you can use the Verify
operation. Specify the\n same asymmetric KMS key, message, and signing algorithm that were used to produce the\n signature.
You can also verify the digital signature by using the public key of the KMS key outside\n of KMS. Use the GetPublicKey operation to download the public key in the\n asymmetric KMS key and then use the public key to verify the signature outside of KMS. To \n verify a signature outside of KMS with an SM2 public key, you must specify the distinguishing \n ID. By default, KMS uses 1234567812345678
as the distinguishing ID. For more \n information, see Offline\n verification with SM2 key pairs in Key Management Service Developer Guide. The\n advantage of using the Verify
operation is that it is performed within KMS. As\n a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged\n in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use\n the KMS key to verify signatures.
The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.
\n\n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId
parameter.
\n Required permissions: kms:Verify (key policy)
\n\n Related operations: Sign\n
" } }, "com.amazonaws.kms#VerifyMac": { From e65c9b2e1203291d68d0fdb11d1b467d83e37cf5 Mon Sep 17 00:00:00 2001 From: awstoolsAmazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO -access to multiple Amazon Web Services accounts and business applications. This guide provides information on -SSO operations which could be used for access management of Amazon Web Services accounts. For information about -Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide.
-Many operations in the SSO APIs rely on identifiers for users and groups, known as -principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO, -see the Amazon Web Services SSO Identity Store API -Reference.
+ ## Installing @@ -33,16 +26,19 @@ using your favorite package manager: The AWS SDK is modulized by clients and commands. To send a request, you only need to import the `SSOAdminClient` and -the commands you need, for example `AttachManagedPolicyToPermissionSetCommand`: +the commands you need, for example `AttachCustomerManagedPolicyReferenceToPermissionSetCommand`: ```js // ES5 example -const { SSOAdminClient, AttachManagedPolicyToPermissionSetCommand } = require("@aws-sdk/client-sso-admin"); +const { + SSOAdminClient, + AttachCustomerManagedPolicyReferenceToPermissionSetCommand, +} = require("@aws-sdk/client-sso-admin"); ``` ```ts // ES6+ example -import { SSOAdminClient, AttachManagedPolicyToPermissionSetCommand } from "@aws-sdk/client-sso-admin"; +import { SSOAdminClient, AttachCustomerManagedPolicyReferenceToPermissionSetCommand } from "@aws-sdk/client-sso-admin"; ``` ### Usage @@ -61,7 +57,7 @@ const client = new SSOAdminClient({ region: "REGION" }); const params = { /** input parameters */ }; -const command = new AttachManagedPolicyToPermissionSetCommand(params); +const command = new AttachCustomerManagedPolicyReferenceToPermissionSetCommand(params); ``` #### Async/await @@ -140,7 +136,7 @@ const client = new AWS.SSOAdmin({ region: "REGION" }); // async/await. try { - const data = await client.attachManagedPolicyToPermissionSet(params); + const data = await client.attachCustomerManagedPolicyReferenceToPermissionSet(params); // process data. } catch (error) { // error handling. @@ -148,7 +144,7 @@ try { // Promises. client - .attachManagedPolicyToPermissionSet(params) + .attachCustomerManagedPolicyReferenceToPermissionSet(params) .then((data) => { // process data. }) @@ -157,7 +153,7 @@ client }); // callbacks. -client.attachManagedPolicyToPermissionSet(params, (err, data) => { +client.attachCustomerManagedPolicyReferenceToPermissionSet(params, (err, data) => { // process err and data. }); ``` diff --git a/clients/client-sso-admin/src/SSOAdmin.ts b/clients/client-sso-admin/src/SSOAdmin.ts index 00a9c6a0171af..f32c8c2a7eb71 100644 --- a/clients/client-sso-admin/src/SSOAdmin.ts +++ b/clients/client-sso-admin/src/SSOAdmin.ts @@ -1,6 +1,11 @@ // smithy-typescript generated code import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; +import { + AttachCustomerManagedPolicyReferenceToPermissionSetCommand, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput, +} from "./commands/AttachCustomerManagedPolicyReferenceToPermissionSetCommand"; import { AttachManagedPolicyToPermissionSetCommand, AttachManagedPolicyToPermissionSetCommandInput, @@ -36,6 +41,11 @@ import { DeleteInstanceAccessControlAttributeConfigurationCommandInput, DeleteInstanceAccessControlAttributeConfigurationCommandOutput, } from "./commands/DeleteInstanceAccessControlAttributeConfigurationCommand"; +import { + DeletePermissionsBoundaryFromPermissionSetCommand, + DeletePermissionsBoundaryFromPermissionSetCommandInput, + DeletePermissionsBoundaryFromPermissionSetCommandOutput, +} from "./commands/DeletePermissionsBoundaryFromPermissionSetCommand"; import { DeletePermissionSetCommand, DeletePermissionSetCommandInput, @@ -66,6 +76,11 @@ import { DescribePermissionSetProvisioningStatusCommandInput, DescribePermissionSetProvisioningStatusCommandOutput, } from "./commands/DescribePermissionSetProvisioningStatusCommand"; +import { + DetachCustomerManagedPolicyReferenceFromPermissionSetCommand, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput, +} from "./commands/DetachCustomerManagedPolicyReferenceFromPermissionSetCommand"; import { DetachManagedPolicyFromPermissionSetCommand, DetachManagedPolicyFromPermissionSetCommandInput, @@ -76,6 +91,11 @@ import { GetInlinePolicyForPermissionSetCommandInput, GetInlinePolicyForPermissionSetCommandOutput, } from "./commands/GetInlinePolicyForPermissionSetCommand"; +import { + GetPermissionsBoundaryForPermissionSetCommand, + GetPermissionsBoundaryForPermissionSetCommandInput, + GetPermissionsBoundaryForPermissionSetCommandOutput, +} from "./commands/GetPermissionsBoundaryForPermissionSetCommand"; import { ListAccountAssignmentCreationStatusCommand, ListAccountAssignmentCreationStatusCommandInput, @@ -96,6 +116,11 @@ import { ListAccountsForProvisionedPermissionSetCommandInput, ListAccountsForProvisionedPermissionSetCommandOutput, } from "./commands/ListAccountsForProvisionedPermissionSetCommand"; +import { + ListCustomerManagedPolicyReferencesInPermissionSetCommand, + ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput, +} from "./commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand"; import { ListInstancesCommand, ListInstancesCommandInput, @@ -136,6 +161,11 @@ import { PutInlinePolicyToPermissionSetCommandInput, PutInlinePolicyToPermissionSetCommandOutput, } from "./commands/PutInlinePolicyToPermissionSetCommand"; +import { + PutPermissionsBoundaryToPermissionSetCommand, + PutPermissionsBoundaryToPermissionSetCommandInput, + PutPermissionsBoundaryToPermissionSetCommandOutput, +} from "./commands/PutPermissionsBoundaryToPermissionSetCommand"; import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommand, @@ -155,18 +185,45 @@ import { import { SSOAdminClient } from "./SSOAdminClient"; /** - *Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO - * access to multiple Amazon Web Services accounts and business applications. This guide provides information on - * SSO operations which could be used for access management of Amazon Web Services accounts. For information about - * Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide.
- *Many operations in the SSO APIs rely on identifiers for users and groups, known as - * principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO, - * see the Amazon Web Services SSO Identity Store API - * Reference.
+ * */ export class SSOAdmin extends SSOAdminClient { /** - *Attaches an IAM managed policy ARN to a permission set.
+ *Attaches the specified IAM customer managed policy to the specified PermissionSet.
+ */ + public attachCustomerManagedPolicyReferenceToPermissionSet( + args: AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + options?: __HttpHandlerOptions + ): PromiseAttaches an Amazon Web Services managed IAM policy ARN to a permission set.
*If the permission set is already referenced by one or more account assignments, you will
* need to call Deletes the permissions boundary from a specified PermissionSet. Deletes the specified permission set. Detaches the attached IAM managed policy ARN from the specified permission set. Detaches the specified IAM customer managed policy from the specified PermissionSet. Detaches the attached Amazon Web Services managed IAM policy ARN from the specified permission set. Obtains the permissions boundary for a specified PermissionSet. Lists the status of the Amazon Web Services account assignment creation requests for a specified SSO
* instance. Lists all IAM customer managed policies attached to a specified PermissionSet. Lists the SSO instances that the caller has access to. Lists the IAM managed policy that is attached to a specified permission set. Lists the Amazon Web Services managed IAM policy that is attached to a specified permission set. Attaches an Amazon Web Services managed or customer managed IAM policy to the specified PermissionSet as a permissions boundary. Associates a set of tags with a specified resource. Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO
- * access to multiple Amazon Web Services accounts and business applications. This guide provides information on
- * SSO operations which could be used for access management of Amazon Web Services accounts. For information about
- * Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide. Many operations in the SSO APIs rely on identifiers for users and groups, known as
- * principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO,
- * see the Amazon Web Services SSO Identity Store API
- * Reference. Attaches the specified IAM customer managed policy to the specified PermissionSet. Attaches an IAM managed policy ARN to a permission set. Attaches an Amazon Web Services managed IAM policy ARN to a permission set. If the permission set is already referenced by one or more account assignments, you will
* need to call Deletes the permissions boundary from a specified PermissionSet. Detaches the specified IAM customer managed policy from the specified PermissionSet. Detaches the attached IAM managed policy ARN from the specified permission set. Detaches the attached Amazon Web Services managed IAM policy ARN from the specified permission set. Obtains the permissions boundary for a specified PermissionSet. Lists all IAM customer managed policies attached to a specified PermissionSet. Lists the IAM managed policy that is attached to a specified permission set. Lists the Amazon Web Services managed IAM policy that is attached to a specified permission set. Attaches an Amazon Web Services managed or customer managed IAM policy to the specified PermissionSet as a permissions boundary. The value used for mapping a specified attribute to an identity source. The value used for mapping a specified attribute to an identity source. For more information, see Attribute mappings in the Amazon Web Services Single Sign-On User Guide. A structure that stores the details of the IAM managed policy. Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. The name of the IAM managed policy. The name of the policy document. The ARN of the IAM managed policy. For more information about ARNs, see Amazon Resource
- * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. The path for the policy. The default is The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
- * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. The ARN of the SSO instance under which the operation will be executed. The ARN of the PermissionSet that the managed policy should be attached
- * to. The ARN of the The IAM managed policy ARN to be attached to a permission set. Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. A structure that stores the details of the Amazon Web Services managed IAM policy. The name of the Amazon Web Services managed IAM policy. The ARN of the Amazon Web Services managed IAM policy. For more information about ARNs, see Amazon Resource
+ * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
+ * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. The ARN of the PermissionSet that the managed policy should be attached
+ * to. The Amazon Web Services managed policy ARN to be attached to a permission set. The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
@@ -543,12 +604,12 @@ export interface Tag {
/**
* The key for the tag. The value of the tag. The ARN of the SSO instance under which the operation will be executed. The ARN of the The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
@@ -1067,6 +1160,43 @@ export namespace DescribePermissionSetProvisioningStatusResponse {
});
}
+export interface DetachCustomerManagedPolicyReferenceFromPermissionSetRequest {
+ /**
+ * The ARN of the SSO instance under which the operation will be executed. The ARN of the Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
@@ -1081,7 +1211,7 @@ export interface DetachManagedPolicyFromPermissionSetRequest {
PermissionSetArn: string | undefined;
/**
- * The IAM managed policy ARN to be attached to a permission set. The Amazon Web Services managed policy ARN to be detached from a permission set. The ARN of the SSO instance under which the operation will be executed. The ARN of the Specifies the configuration of the Amazon Web Services managed or customer managed policy that you want to set as a permissions boundary. Specify either Policies used as permissions boundaries do not provide permissions. You must also attach an IAM policy to the role. To learn how the effective permissions for a role are evaluated, see IAM JSON policy evaluation logic in the Identity and Access Management User Guide. Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. The Amazon Web Services managed policy ARN that you want to attach to a permission set as a permissions boundary. The permissions boundary attached to the specified permission set. The ARN of the SSO instance under which the operation will be executed. The ARN of the The maximum number of results to display for the list call. The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. Specifies the names and paths of the IAM customer managed policies that you have attached to your permission set. The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. The maximum number of results to display for the instance. The ARN of the SSO instance under which the operation will be executed. The ARN of the The permissions boundary that you want to attach to a The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
diff --git a/clients/client-sso-admin/src/pagination/ListCustomerManagedPolicyReferencesInPermissionSetPaginator.ts b/clients/client-sso-admin/src/pagination/ListCustomerManagedPolicyReferencesInPermissionSetPaginator.ts
new file mode 100644
index 0000000000000..3060e1542838e
--- /dev/null
+++ b/clients/client-sso-admin/src/pagination/ListCustomerManagedPolicyReferencesInPermissionSetPaginator.ts
@@ -0,0 +1,61 @@
+// smithy-typescript generated code
+import { Paginator } from "@aws-sdk/types";
+
+import {
+ ListCustomerManagedPolicyReferencesInPermissionSetCommand,
+ ListCustomerManagedPolicyReferencesInPermissionSetCommandInput,
+ ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput,
+} from "../commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand";
+import { SSOAdmin } from "../SSOAdmin";
+import { SSOAdminClient } from "../SSOAdminClient";
+import { SSOAdminPaginationConfiguration } from "./Interfaces";
+
+/**
+ * @private
+ */
+const makePagedClientRequest = async (
+ client: SSOAdminClient,
+ input: ListCustomerManagedPolicyReferencesInPermissionSetCommandInput,
+ ...args: any
+): Promise The value used for mapping a specified attribute to an identity source. The value used for mapping a specified attribute to an identity source. For more information, see Attribute mappings in the Amazon Web Services Single Sign-On User Guide. Attaches the specified IAM customer managed policy to the specified PermissionSet. The ARN of the SSO instance under which the operation will be executed. The ARN of the Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. Attaches an IAM managed policy ARN to a permission set. If the permission set is already referenced by one or more account assignments, you will\n need to call Attaches an Amazon Web Services managed IAM policy ARN to a permission set. If the permission set is already referenced by one or more account assignments, you will\n need to call The IAM managed policy ARN to be attached to a permission set. The Amazon Web Services managed policy ARN to be attached to a permission set. The name of the IAM managed policy. The name of the Amazon Web Services managed IAM policy. The ARN of the IAM managed policy. For more information about ARNs, see Amazon Resource\nNames (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. The ARN of the Amazon Web Services managed IAM policy. For more information about ARNs, see Amazon Resource\nNames (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. A structure that stores the details of the IAM managed policy. A structure that stores the details of the Amazon Web Services managed IAM policy. The name of the policy document. The path for the policy. The default is Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. Deletes the permissions boundary from a specified PermissionSet. The ARN of the SSO instance under which the operation will be executed. The ARN of the Detaches the specified IAM customer managed policy from the specified PermissionSet. The ARN of the SSO instance under which the operation will be executed. The ARN of the Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. Detaches the attached IAM managed policy ARN from the specified permission set. Detaches the attached Amazon Web Services managed IAM policy ARN from the specified permission set. The IAM managed policy ARN to be attached to a permission set. The Amazon Web Services managed policy ARN to be detached from a permission set. Obtains the permissions boundary for a specified PermissionSet. The ARN of the SSO instance under which the operation will be executed. The ARN of the The permissions boundary attached to the specified permission set. Lists all IAM customer managed policies attached to a specified PermissionSet. The ARN of the SSO instance under which the operation will be executed. The ARN of the The maximum number of results to display for the list call. The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. Specifies the names and paths of the IAM customer managed policies that you have attached to your permission set. The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. Lists the IAM managed policy that is attached to a specified permission set. Lists the Amazon Web Services managed IAM policy that is attached to a specified permission set. The ARN of the resource with the tags to be listed. Provides information about the permission set provisioning status. Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. The Amazon Web Services managed policy ARN that you want to attach to a permission set as a permissions boundary. Specifies the configuration of the Amazon Web Services managed or customer managed policy that you want to set as a permissions boundary. Specify either Policies used as permissions boundaries do not provide permissions. You must also attach an IAM policy to the role. To learn how the effective permissions for a role are evaluated, see IAM JSON policy evaluation logic in the Identity and Access Management User Guide. Attaches an Amazon Web Services managed or customer managed IAM policy to the specified PermissionSet as a permissions boundary. The ARN of the SSO instance under which the operation will be executed. The ARN of the The permissions boundary that you want to attach to a Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO\n access to multiple Amazon Web Services accounts and business applications. This guide provides information on\n SSO operations which could be used for access management of Amazon Web Services accounts. For information about\n Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide. Many operations in the SSO APIs rely on identifiers for users and groups, known as\n principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO,\n see the Amazon Web Services SSO Identity Store API\n Reference. The key for the tag. The key for the tag. The value of the tag. The value of the tag. The ARN of the resource with the tags to be listed. The ARN of the resource with the tags to be listed. Creates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment configuration and devices. Creates a new stage in an existing edge deployment plan. Starts a SageMaker Edge Manager model packaging job. Edge Manager will use the model artifacts from the Amazon Simple Storage Service bucket that you specify. After the model has been packaged, Amazon SageMaker saves the resulting artifacts to an S3 bucket that you specify. Deletes an edge deployment plan if (and only if) all the stages in the plan are inactive or there are no stages in the plan. Delete a stage in an edge deployment plan if (and only if) the stage is inactive. Deletes an endpoint. SageMaker frees up all of the resources that were deployed when the
* endpoint was created. Describes an edge deployment plan with deployment status per stage. A description of edge packaging jobs. Lists all edge deployment plans. Returns a list of edge packaging jobs. Lists devices allocated to the stage, containing detailed device information and deployment status. Lists the Studio Lifecycle Configurations in your Amazon Web Services Account. Starts a stage in an edge deployment plan. Starts a previously stopped monitoring schedule. Stops a stage in an edge deployment plan. Request to stop an edge packaging job. Creates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment configuration and devices. Creates a new stage in an existing edge deployment plan. Deletes an edge deployment plan if (and only if) all the stages in the plan are inactive or there are no stages in the plan. Delete a stage in an edge deployment plan if (and only if) the stage is inactive. Describes an edge deployment plan with deployment status per stage. Lists all edge deployment plans. Lists devices allocated to the stage, containing detailed device information and deployment status. Starts a stage in an edge deployment plan. Stops a stage in an edge deployment plan. Contains information about the configuration of a model in a deployment. The name the device application uses to reference this model. The edge packaging job associated with this deployment. Contains information about the configuration of a deployment. Toggle that determines whether to rollback to previous configuration if the current deployment fails.
+ * By default this is turned on. You may turn this off if you want to investigate the errors yourself. Contains information about the configurations of selected devices. Type of device subsets to deploy to the current stage. Percentage of devices in the fleet to deploy to the current stage. List of devices chosen to deploy. A filter to select devices with names containing this name. Contains information about a stage in an edge deployment plan. The name of the stage. Configuration of the devices in the stage. Configuration of the deployment details. The name of the edge deployment plan. List of models associated with the edge deployment plan. The device fleet used for this edge deployment plan. List of stages of the edge deployment plan. The number of stages is limited to 10 per deployment. List of tags with which to tag the edge deployment plan. The ARN of the edge deployment plan. The name of the edge deployment plan. List of stages to be added to the edge deployment plan. The name of the edge packaging job. Configures a hyperparameter tuning job. Specifies how hyperparameter tuning chooses the combinations of hyperparameter values
- * to use for the training job it launches. To use the Bayesian search strategy, set this
- * to The HyperParameterTuningJobObjective object that specifies the
- * objective
- * metric for this tuning job. The ResourceLimits object that specifies the
- * maximum
- * number of training jobs and parallel training jobs for this tuning
- * job. The ParameterRanges object that specifies the ranges of
- * hyperparameters
- * that this tuning job searches. Specifies whether to use early stopping for training jobs launched by the
- * hyperparameter tuning job. This can be one of the following values (the default value is
- * Training jobs launched by the hyperparameter tuning job do not use early
- * stopping. SageMaker stops training jobs launched by the hyperparameter tuning job when
- * they are unlikely to perform better than previously completed training jobs.
- * For more information, see Stop Training Jobs Early. The tuning job's completion criteria. Specifies
- * which
- * training algorithm to use for training jobs that a hyperparameter
- * tuning job launches and the metrics to monitor. The registry path of the Docker image that contains the training algorithm. For
- * information about Docker registry paths for built-in algorithms, see Algorithms
- * Provided by Amazon SageMaker: Common Parameters. SageMaker supports both
- * The training input mode that the algorithm supports. For more information about input modes, see
- * Algorithms.
- * Pipe mode
- * If an algorithm supports
- * File mode
- * If an algorithm supports You must provision the ML storage volume with sufficient capacity
- * to accommodate the data downloaded from S3. In addition to the training data, the ML
- * storage volume also stores the output model. The algorithm container uses the ML storage
- * volume to also store intermediate information, if any. For distributed algorithms, training data is distributed uniformly.
- * Your training duration is predictable if the input data objects sizes are
- * approximately the same. SageMaker does not split the files any further for model training.
- * If the object sizes are skewed, training won't be optimal as the data distribution is also
- * skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in
- * training.
- * FastFile mode
- * If an algorithm supports
- * The name of the resource algorithm to use for the hyperparameter tuning job. If you
- * specify a value for this parameter, do not specify a value for
- * An array of MetricDefinition objects that specify the
- * metrics
- * that the algorithm emits. The retry strategy to use when a training job fails due to an
- * The number of times to retry the job. When the job is retried, it's
- * Defines
- * the training jobs launched by a hyperparameter tuning job. The job definition name. Defines the objective metric for a hyperparameter tuning job.
- * Hyperparameter
- * tuning uses the value of this metric to evaluate the training jobs it launches, and
- * returns the training job that results in either the highest or lowest value for this
- * metric, depending on the value you specify for the Specifies ranges of integer, continuous, and categorical hyperparameters that a
- * hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs
- * with hyperparameter values within these ranges to find the combination of values that
- * result in the training job with the best performance as measured by the objective metric
- * of the hyperparameter tuning job. The maximum number of items specified for Specifies the values of hyperparameters
- * that
- * do not change for the tuning job. The HyperParameterAlgorithmSpecification object that
- * specifies
- * the resource algorithm to use for the training jobs that the tuning
- * job launches. The Amazon Resource Name (ARN) of the
- * IAM
- * role associated with the training jobs that the tuning job
- * launches. An array of Channel objects that specify
- * the
- * input for the training jobs that the tuning job launches. The VpcConfig object that
- * specifies
- * the VPC that you want the training jobs that this hyperparameter
- * tuning job launches to connect to. Control access to and from your
- * training
- * container by configuring the VPC. For more information, see Protect Training Jobs
- * by Using an Amazon Virtual Private Cloud. Specifies the path to the Amazon S3 bucket where you
- * store
- * model artifacts from the training jobs that the tuning job
- * launches. The resources,
- * including
- * the compute instances and storage volumes, to use for the training
- * jobs that the tuning job launches. Storage volumes store model artifacts and
- * incremental
- * states. Training algorithms might also use storage volumes for
- * scratch
- * space. If you want SageMaker to use the storage volume to store the
- * training data, choose Specifies a limit to how long a model hyperparameter training job can run. It also
- * specifies how long a managed spot training job has to complete. When the job reaches the
- * time limit, SageMaker ends the training job. Use this API to cap model training costs. Isolates the training container. No inbound or outbound network calls can be made,
- * except for calls between peers within a training cluster for distributed training. If
- * network isolation is used for training jobs that are configured to use a VPC, SageMaker
- * downloads and uploads customer data and model artifacts through the specified VPC, but
- * the training container does not have network access. To encrypt all communications between ML compute instances in distributed training,
- * choose A Boolean indicating whether managed spot training is enabled ( Contains information about the output location for managed spot training checkpoint
- * data. The number of times to retry the job when the job fails due to an
- * A previously completed or stopped hyperparameter tuning job to be used as a starting
- * point for a new hyperparameter tuning job. The name of the hyperparameter tuning job to be used as a starting point for a new
- * hyperparameter tuning job. Specifies the configuration for a hyperparameter tuning job that uses one or more
- * previous hyperparameter tuning jobs as a starting point. The results of previous tuning
- * jobs are used to inform which combinations of hyperparameters to search over in the new
- * tuning job. All training jobs launched by the new hyperparameter tuning job are evaluated by using
- * the objective metric, and the training job that performs the best is compared to the
- * best training jobs from the parent tuning jobs. From these, the training job that
- * performs the best as measured by the objective metric is returned as the overall best
- * training job. All training jobs launched by parent hyperparameter tuning jobs and the new
- * hyperparameter tuning jobs count against the limit of training jobs for the tuning
- * job. An array of hyperparameter tuning jobs that are used as the starting point for the new
- * hyperparameter tuning job. For more information about warm starting a hyperparameter
- * tuning job, see Using a Previous
- * Hyperparameter Tuning Job as a Starting Point. Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent
- * jobs for warm start tuning jobs. Specifies one of the following: The new hyperparameter tuning job uses the same input data and training
- * image as the parent tuning jobs. You can change the hyperparameter ranges to
- * search and the maximum number of training jobs that the hyperparameter
- * tuning job launches. You cannot use a new version of the training algorithm,
- * unless the changes in the new version do not affect the algorithm itself.
- * For example, changes that improve logging or adding support for a different
- * data format are allowed. You can also change hyperparameters from tunable to
- * static, and from static to tunable, but the total number of static plus
- * tunable hyperparameters must remain the same as it is in all parent jobs.
- * The objective metric for the new tuning job must be the same as for all
- * parent jobs. The new hyperparameter tuning job can include input data, hyperparameter
- * ranges, maximum number of concurrent training jobs, and maximum number of
- * training jobs that are different than those of its parent hyperparameter
- * tuning jobs. The training image can also be a different version from the
- * version used in the parent hyperparameter tuning job. You can also change
- * hyperparameters from tunable to static, and from static to tunable, but the
- * total number of static plus tunable hyperparameters must remain the same as
- * it is in all parent jobs. The objective metric for the new tuning job must
- * be the same as for all parent jobs. The name of the tuning job. This name is the prefix for the names of all training jobs
- * that this tuning job launches. The name must be unique within the same Amazon Web Services account and
- * Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9,
- * and : + = @ _ % - (hyphen). The name is not case sensitive. The HyperParameterTuningJobConfig object that describes the tuning
- * job, including the search strategy, the objective metric used to evaluate training jobs,
- * ranges of parameters to search, and resource limits for the tuning job. For more
- * information, see How
- * Hyperparameter Tuning Works. The HyperParameterTrainingJobDefinition object that describes the
- * training jobs that this tuning job launches, including static hyperparameters, input
- * data configuration, output data configuration, resource configuration, and stopping
- * condition. A list of the HyperParameterTrainingJobDefinition objects launched
- * for this tuning job. Specifies the configuration for starting the hyperparameter tuning job using one or
- * more previous tuning jobs as a starting point. The results of previous tuning jobs are
- * used to inform which combinations of hyperparameters to search over in the new tuning
- * job. All training jobs launched by the new hyperparameter tuning job are evaluated by using
- * the objective metric. If you specify All training jobs launched by parent hyperparameter tuning jobs and the new
- * hyperparameter tuning jobs count against the limit of training jobs for the tuning
- * job. An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in
- * different ways, for example, by purpose, owner, or environment. For more information,
- * see Tagging Amazon Web Services
- * Resources. Tags that you specify for the tuning job are also added to all training jobs that the
- * tuning job launches. The Amazon Resource Name (ARN) of the tuning job. SageMaker assigns an ARN to a
- * hyperparameter tuning job when you create it. Configures a hyperparameter tuning job. Specifies how hyperparameter tuning chooses the combinations of hyperparameter values
+ * to use for the training job it launches. To use the Bayesian search strategy, set this
+ * to The HyperParameterTuningJobObjective object that specifies the
+ * objective
+ * metric for this tuning job. The ResourceLimits object that specifies the
+ * maximum
+ * number of training jobs and parallel training jobs for this tuning
+ * job. The ParameterRanges object that specifies the ranges of
+ * hyperparameters
+ * that this tuning job searches. Specifies whether to use early stopping for training jobs launched by the
+ * hyperparameter tuning job. This can be one of the following values (the default value is
+ * Training jobs launched by the hyperparameter tuning job do not use early
+ * stopping. SageMaker stops training jobs launched by the hyperparameter tuning job when
+ * they are unlikely to perform better than previously completed training jobs.
+ * For more information, see Stop Training Jobs Early. The tuning job's completion criteria. Specifies
+ * which
+ * training algorithm to use for training jobs that a hyperparameter
+ * tuning job launches and the metrics to monitor. The registry path of the Docker image that contains the training algorithm. For
+ * information about Docker registry paths for built-in algorithms, see Algorithms
+ * Provided by Amazon SageMaker: Common Parameters. SageMaker supports both
+ * The training input mode that the algorithm supports. For more information about input modes, see
+ * Algorithms.
+ * Pipe mode
+ * If an algorithm supports
+ * File mode
+ * If an algorithm supports You must provision the ML storage volume with sufficient capacity
+ * to accommodate the data downloaded from S3. In addition to the training data, the ML
+ * storage volume also stores the output model. The algorithm container uses the ML storage
+ * volume to also store intermediate information, if any. For distributed algorithms, training data is distributed uniformly.
+ * Your training duration is predictable if the input data objects sizes are
+ * approximately the same. SageMaker does not split the files any further for model training.
+ * If the object sizes are skewed, training won't be optimal as the data distribution is also
+ * skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in
+ * training.
+ * FastFile mode
+ * If an algorithm supports
+ * The name of the resource algorithm to use for the hyperparameter tuning job. If you
+ * specify a value for this parameter, do not specify a value for
+ * An array of MetricDefinition objects that specify the
+ * metrics
+ * that the algorithm emits. The retry strategy to use when a training job fails due to an
+ * The number of times to retry the job. When the job is retried, it's
+ * Defines
+ * the training jobs launched by a hyperparameter tuning job. The job definition name. Defines the objective metric for a hyperparameter tuning job.
+ * Hyperparameter
+ * tuning uses the value of this metric to evaluate the training jobs it launches, and
+ * returns the training job that results in either the highest or lowest value for this
+ * metric, depending on the value you specify for the Specifies ranges of integer, continuous, and categorical hyperparameters that a
+ * hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs
+ * with hyperparameter values within these ranges to find the combination of values that
+ * result in the training job with the best performance as measured by the objective metric
+ * of the hyperparameter tuning job. The maximum number of items specified for Specifies the values of hyperparameters
+ * that
+ * do not change for the tuning job. The HyperParameterAlgorithmSpecification object that
+ * specifies
+ * the resource algorithm to use for the training jobs that the tuning
+ * job launches. The Amazon Resource Name (ARN) of the
+ * IAM
+ * role associated with the training jobs that the tuning job
+ * launches. An array of Channel objects that specify
+ * the
+ * input for the training jobs that the tuning job launches. The VpcConfig object that
+ * specifies
+ * the VPC that you want the training jobs that this hyperparameter
+ * tuning job launches to connect to. Control access to and from your
+ * training
+ * container by configuring the VPC. For more information, see Protect Training Jobs
+ * by Using an Amazon Virtual Private Cloud. Specifies the path to the Amazon S3 bucket where you
+ * store
+ * model artifacts from the training jobs that the tuning job
+ * launches. The resources,
+ * including
+ * the compute instances and storage volumes, to use for the training
+ * jobs that the tuning job launches. Storage volumes store model artifacts and
+ * incremental
+ * states. Training algorithms might also use storage volumes for
+ * scratch
+ * space. If you want SageMaker to use the storage volume to store the
+ * training data, choose Specifies a limit to how long a model hyperparameter training job can run. It also
+ * specifies how long a managed spot training job has to complete. When the job reaches the
+ * time limit, SageMaker ends the training job. Use this API to cap model training costs. Isolates the training container. No inbound or outbound network calls can be made,
+ * except for calls between peers within a training cluster for distributed training. If
+ * network isolation is used for training jobs that are configured to use a VPC, SageMaker
+ * downloads and uploads customer data and model artifacts through the specified VPC, but
+ * the training container does not have network access. To encrypt all communications between ML compute instances in distributed training,
+ * choose A Boolean indicating whether managed spot training is enabled ( Contains information about the output location for managed spot training checkpoint
+ * data. The number of times to retry the job when the job fails due to an
+ * A previously completed or stopped hyperparameter tuning job to be used as a starting
+ * point for a new hyperparameter tuning job. The name of the hyperparameter tuning job to be used as a starting point for a new
+ * hyperparameter tuning job. Specifies the configuration for a hyperparameter tuning job that uses one or more
+ * previous hyperparameter tuning jobs as a starting point. The results of previous tuning
+ * jobs are used to inform which combinations of hyperparameters to search over in the new
+ * tuning job. All training jobs launched by the new hyperparameter tuning job are evaluated by using
+ * the objective metric, and the training job that performs the best is compared to the
+ * best training jobs from the parent tuning jobs. From these, the training job that
+ * performs the best as measured by the objective metric is returned as the overall best
+ * training job. All training jobs launched by parent hyperparameter tuning jobs and the new
+ * hyperparameter tuning jobs count against the limit of training jobs for the tuning
+ * job. An array of hyperparameter tuning jobs that are used as the starting point for the new
+ * hyperparameter tuning job. For more information about warm starting a hyperparameter
+ * tuning job, see Using a Previous
+ * Hyperparameter Tuning Job as a Starting Point. Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent
+ * jobs for warm start tuning jobs. Specifies one of the following: The new hyperparameter tuning job uses the same input data and training
+ * image as the parent tuning jobs. You can change the hyperparameter ranges to
+ * search and the maximum number of training jobs that the hyperparameter
+ * tuning job launches. You cannot use a new version of the training algorithm,
+ * unless the changes in the new version do not affect the algorithm itself.
+ * For example, changes that improve logging or adding support for a different
+ * data format are allowed. You can also change hyperparameters from tunable to
+ * static, and from static to tunable, but the total number of static plus
+ * tunable hyperparameters must remain the same as it is in all parent jobs.
+ * The objective metric for the new tuning job must be the same as for all
+ * parent jobs. The new hyperparameter tuning job can include input data, hyperparameter
+ * ranges, maximum number of concurrent training jobs, and maximum number of
+ * training jobs that are different than those of its parent hyperparameter
+ * tuning jobs. The training image can also be a different version from the
+ * version used in the parent hyperparameter tuning job. You can also change
+ * hyperparameters from tunable to static, and from static to tunable, but the
+ * total number of static plus tunable hyperparameters must remain the same as
+ * it is in all parent jobs. The objective metric for the new tuning job must
+ * be the same as for all parent jobs. The name of the tuning job. This name is the prefix for the names of all training jobs
+ * that this tuning job launches. The name must be unique within the same Amazon Web Services account and
+ * Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9,
+ * and : + = @ _ % - (hyphen). The name is not case sensitive. The HyperParameterTuningJobConfig object that describes the tuning
+ * job, including the search strategy, the objective metric used to evaluate training jobs,
+ * ranges of parameters to search, and resource limits for the tuning job. For more
+ * information, see How
+ * Hyperparameter Tuning Works. The HyperParameterTrainingJobDefinition object that describes the
+ * training jobs that this tuning job launches, including static hyperparameters, input
+ * data configuration, output data configuration, resource configuration, and stopping
+ * condition. A list of the HyperParameterTrainingJobDefinition objects launched
+ * for this tuning job. Specifies the configuration for starting the hyperparameter tuning job using one or
+ * more previous tuning jobs as a starting point. The results of previous tuning jobs are
+ * used to inform which combinations of hyperparameters to search over in the new tuning
+ * job. All training jobs launched by the new hyperparameter tuning job are evaluated by using
+ * the objective metric. If you specify All training jobs launched by parent hyperparameter tuning jobs and the new
+ * hyperparameter tuning jobs count against the limit of training jobs for the tuning
+ * job. An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in
+ * different ways, for example, by purpose, owner, or environment. For more information,
+ * see Tagging Amazon Web Services
+ * Resources. Tags that you specify for the tuning job are also added to all training jobs that the
+ * tuning job launches. The Amazon Resource Name (ARN) of the tuning job. SageMaker assigns an ARN to a
+ * hyperparameter tuning job when you create it. The description of the image. The name of the endpoint that you want to delete. The name of the edge deployment plan to delete. The name of the endpoint configuration that you want to delete. The name of the edge deployment plan from which the stage will be deleted. The name of the stage. The name of the endpoint that you want to delete. The name of the endpoint configuration that you want to delete. Contains information summarizing the deployment stage results. The general status of the current stage. The number of edge devices with the successful deployment in the current stage. The number of edge devices yet to pick up the deployment in current stage, or in progress. The number of edge devices that failed the deployment in current stage. A detailed message about deployment status in current stage. The time when the deployment API started. Contains information summarizing the deployment stage results. The name of the stage. Configuration of the devices in the stage. Configuration of the deployment details. General status of the current state. The name of the fleet the devices belong to. The name of the deployment plan to describe. If the edge deployment plan has enough stages to require tokening, then this is the response from the last list of stages returned. The maximum number of results to select (50 by default). The ARN of edge deployment plan. The name of the edge deployment plan. List of models associated with the edge deployment plan. The device fleet used for this edge deployment plan. The number of edge devices with the successful deployment. The number of edge devices yet to pick up deployment, or in progress. The number of edge devices that failed the deployment. List of stages in the edge deployment plan. Token to use when calling the next set of stages in the edge deployment plan. The time when the edge deployment plan was created. The time when the edge deployment plan was last updated. The name of the edge packaging job. The name of the tuning job. Shows the final value for the
- * objective
- * metric for a training job that was launched by a hyperparameter
- * tuning job. You define the objective metric in the
- * Whether to
- * minimize
- * or maximize the objective metric. Valid values are Minimize and
- * Maximize. The name of the
- * objective
- * metric. The value of the objective metric. The container for the summary information about a training job. The training job definition name. The name of the training job. The Amazon Resource Name (ARN) of the training job. The HyperParameter tuning job that launched the training job. The date and time that the training job was created. The date and time that the training job started. Specifies the time when the training job ends on training instances. You are billed
- * for the time interval between the value of The
- * status
- * of the training job. A
- * list of the hyperparameters for which you specified ranges to
- * search. The
- * reason that the training job failed.
- * The FinalHyperParameterTuningJobObjectiveMetric object that
- * specifies the
- * value
- * of the
- * objective
- * metric of the tuning job that launched this training job. The status of the objective metric for the training job: Succeeded: The
- * final
- * objective metric for the training job was evaluated by the
- * hyperparameter tuning job and
- * used
- * in the hyperparameter tuning process. Pending: The training job is in progress and evaluation of its final objective
- * metric is pending. Failed:
- * The final objective metric for the training job was not evaluated, and was not
- * used in the hyperparameter tuning process. This typically occurs when the
- * training job failed or did not emit an objective
- * metric. Specifies the number of training jobs that this hyperparameter tuning job launched,
- * categorized by the status of their objective metric. The objective metric status shows
- * whether the
- * final
- * objective metric for the training job has been evaluated by the
- * tuning job and used in the hyperparameter tuning process. The number of training jobs whose final objective metric was evaluated by the
- * hyperparameter tuning job and used in the hyperparameter tuning process. The number of training jobs that are in progress and pending evaluation of their final
- * objective metric. The number of training jobs whose final objective metric was not evaluated and used in
- * the hyperparameter tuning process. This typically occurs when the training job failed or
- * did not emit an objective metric. The numbers of training jobs launched by a hyperparameter tuning job, categorized by
- * status. The number of completed training jobs launched by the hyperparameter tuning
- * job. The number of in-progress training jobs launched by a hyperparameter tuning
- * job. The number of training jobs that failed, but can be retried. A failed training job can
- * be retried only if it failed because an internal service error occurred. The number of training jobs that failed and can't be retried. A failed training job
- * can't be retried if it failed because a client error occurred. The number of training jobs launched by a hyperparameter tuning job that were
- * manually
- * stopped. The name of the tuning job. The
- * Amazon Resource Name (ARN) of the tuning job. The HyperParameterTuningJobConfig object that specifies the
- * configuration of the tuning job. The HyperParameterTrainingJobDefinition object that specifies the
- * definition of the training jobs that this tuning job launches. A list of the HyperParameterTrainingJobDefinition objects launched
- * for this tuning job. The status of the tuning job: InProgress, Completed, Failed, Stopping, or
- * Stopped. The date and time that the tuning job started. The date and time that the tuning job ended. The date and time that the status of the tuning job was modified. The TrainingJobStatusCounters object that specifies the number of
- * training jobs, categorized by status, that this tuning job launched. The ObjectiveStatusCounters object that specifies the number of
- * training jobs, categorized by the status of their final objective metric, that this
- * tuning job launched. A TrainingJobSummary object that describes the training job that
- * completed with the best current HyperParameterTuningJobObjective. If the hyperparameter tuning job is an warm start tuning job with a
- * The configuration for starting the hyperparameter parameter tuning job using one or
- * more previous tuning jobs as a starting point. The results of previous tuning jobs are
- * used to inform which combinations of hyperparameters to search over in the new tuning
- * job. If the tuning job failed, the reason it failed. The name of the image to describe. When the image was created. The description of the image. The name of the image as displayed. When a create, update, or delete operation fails, the reason for the failure. The Amazon Resource Name (ARN) of the image. The name of the image. The status of the image. When the image was last modified. The Amazon Resource Name (ARN) of the IAM role that enables Amazon SageMaker to perform tasks on your behalf. The name of the image. The version of the image. If not specified, the latest version is described. The registry path of the container image on which this image version is based. The registry path of the container image that contains this image version. When the version was created. When a create or delete operation fails, the reason for the failure. The Amazon Resource Name (ARN) of the image the version is based on. The ARN of the version. The status of the version. When the version was last modified. The version number. The name of the job. The name must be unique within an
- * Amazon Web Services Region in the Amazon Web Services account. The endpoint configuration made by Inference Recommender during a recommendation job. The name of the endpoint made during a recommendation job. The name of the production variant (deployed model) made during a recommendation job. The instance type recommended by Amazon SageMaker Inference Recommender. The number of instances recommended to launch initially. The metrics of recommendations. Defines the cost per hour for the instance. The name of the tuning job. Defines the cost per inference for the instance . Shows the final value for the
+ * objective
+ * metric for a training job that was launched by a hyperparameter
+ * tuning job. You define the objective metric in the
+ * The expected maximum number of requests per minute for the instance. Whether to
+ * minimize
+ * or maximize the objective metric. Valid values are Minimize and
+ * Maximize. The expected model latency at maximum invocation per minute for the instance. The name of the
+ * objective
+ * metric. The value of the objective metric. A list of environment parameters suggested by the Amazon SageMaker Inference Recommender. The container for the summary information about a training job. The environment key suggested by the Amazon SageMaker Inference Recommender. The training job definition name. The value type suggested by the Amazon SageMaker Inference Recommender. The name of the training job. The value suggested by the Amazon SageMaker Inference Recommender. The Amazon Resource Name (ARN) of the training job. The HyperParameter tuning job that launched the training job. The date and time that the training job was created. The date and time that the training job started. Specifies the time when the training job ends on training instances. You are billed
+ * for the time interval between the value of The
+ * status
+ * of the training job. A
+ * list of the hyperparameters for which you specified ranges to
+ * search. The
+ * reason that the training job failed.
+ * The FinalHyperParameterTuningJobObjectiveMetric object that
+ * specifies the
+ * value
+ * of the
+ * objective
+ * metric of the tuning job that launched this training job. The status of the objective metric for the training job: Succeeded: The
+ * final
+ * objective metric for the training job was evaluated by the
+ * hyperparameter tuning job and
+ * used
+ * in the hyperparameter tuning process. Pending: The training job is in progress and evaluation of its final objective
+ * metric is pending. Failed:
+ * The final objective metric for the training job was not evaluated, and was not
+ * used in the hyperparameter tuning process. This typically occurs when the
+ * training job failed or did not emit an objective
+ * metric. Defines the model configuration. Includes the specification name and environment parameters. Specifies the number of training jobs that this hyperparameter tuning job launched,
+ * categorized by the status of their objective metric. The objective metric status shows
+ * whether the
+ * final
+ * objective metric for the training job has been evaluated by the
+ * tuning job and used in the hyperparameter tuning process. The inference specification name in the model package version. The number of training jobs whose final objective metric was evaluated by the
+ * hyperparameter tuning job and used in the hyperparameter tuning process. Defines the environment parameters that includes key, value types, and values. The number of training jobs that are in progress and pending evaluation of their final
+ * objective metric. The number of training jobs whose final objective metric was not evaluated and used in
+ * the hyperparameter tuning process. This typically occurs when the training job failed or
+ * did not emit an objective metric. A list of recommendations made by Amazon SageMaker Inference Recommender. The numbers of training jobs launched by a hyperparameter tuning job, categorized by
+ * status. The metrics used to decide what recommendation to make. The number of completed training jobs launched by the hyperparameter tuning
+ * job. Defines the endpoint configuration parameters. The number of in-progress training jobs launched by a hyperparameter tuning
+ * job. Defines the model configuration. The number of training jobs that failed, but can be retried. A failed training job can
+ * be retried only if it failed because an internal service error occurred. The number of training jobs that failed and can't be retried. A failed training job
+ * can't be retried if it failed because a client error occurred. The number of training jobs launched by a hyperparameter tuning job that were
+ * manually
+ * stopped. The name of the job. The name must be unique within an
- * Amazon Web Services Region in the Amazon Web Services account. The name of the tuning job. The job description that you provided when you initiated the job. The
+ * Amazon Resource Name (ARN) of the tuning job. The job type that you provided when you initiated the job. The HyperParameterTuningJobConfig object that specifies the
+ * configuration of the tuning job. The Amazon Resource Name (ARN) of the job. The HyperParameterTrainingJobDefinition object that specifies the
+ * definition of the training jobs that this tuning job launches. The Amazon Resource Name (ARN) of the Amazon Web Services
- * Identity and Access Management (IAM) role you provided when you initiated the job. A list of the HyperParameterTrainingJobDefinition objects launched
+ * for this tuning job. The status of the job. The status of the tuning job: InProgress, Completed, Failed, Stopping, or
+ * Stopped. A timestamp that shows when the job was created. The date and time that the tuning job started. A timestamp that shows when the job completed. The date and time that the tuning job ended. A timestamp that shows when the job was last modified. The date and time that the status of the tuning job was modified. If the job fails, provides information why the job failed. The TrainingJobStatusCounters object that specifies the number of
+ * training jobs, categorized by status, that this tuning job launched. Returns information about the versioned model package Amazon Resource Name (ARN),
- * the traffic pattern, and endpoint configurations you provided when you initiated the job. The ObjectiveStatusCounters object that specifies the number of
+ * training jobs, categorized by the status of their final objective metric, that this
+ * tuning job launched. The stopping conditions that you provided when you initiated the job. A TrainingJobSummary object that describes the training job that
+ * completed with the best current HyperParameterTuningJobObjective. The recommendations made by Inference Recommender. If the hyperparameter tuning job is an warm start tuning job with a
+ * The configuration for starting the hyperparameter parameter tuning job using one or
+ * more previous tuning jobs as a starting point. The results of previous tuning jobs are
+ * used to inform which combinations of hyperparameters to search over in the new tuning
+ * job. If the tuning job failed, the reason it failed. The name of the labeling job to return information for. The name of the image to describe. Provides a breakdown of the number of objects labeled. The total number of objects labeled. When the image was created. The total number of objects labeled by a human worker. The description of the image. The total number of objects labeled by automated data labeling. The name of the image as displayed. The total number of objects that could not be labeled due to an error. When a create, update, or delete operation fails, the reason for the failure. The total number of objects not yet labeled. The Amazon Resource Name (ARN) of the image. The name of the image. The status of the image. When the image was last modified. The Amazon Resource Name (ARN) of the IAM role that enables Amazon SageMaker to perform tasks on your behalf. Specifies the location of the output produced by the labeling job. The Amazon S3 bucket location of the manifest file for labeled data. The name of the image. The Amazon Resource Name (ARN) for the most recent SageMaker model trained as part of
- * automated data labeling. The version of the image. If not specified, the latest version is described. The processing status of the labeling job. Provides a breakdown of the number of data objects labeled by humans, the number of
- * objects labeled by machine, the number of objects than couldn't be labeled, and the
- * total number of objects labeled. If the job failed, the reason that it failed. The date and time that the labeling job was created. The date and time that the labeling job was last updated. The registry path of the container image on which this image version is based. A unique identifier for work done as part of a labeling job. The registry path of the container image that contains this image version. The name assigned to the labeling job when it was created. When the version was created. The Amazon Resource Name (ARN) of the labeling job. When a create or delete operation fails, the reason for the failure. The attribute used as the label in the output manifest file. The Amazon Resource Name (ARN) of the image the version is based on. Input configuration information for the labeling job, such as the Amazon S3 location of the
- * data objects and the location of the manifest file that describes the data
- * objects. The ARN of the version. The location of the job's output data and the Amazon Web Services Key Management Service key ID for the key used to
- * encrypt the output data, if any. The status of the version. The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf
- * during data labeling. When the version was last modified. The S3 location of the JSON file that defines the categories used to label data
- * objects. Please note the following label-category limits: Semantic segmentation labeling jobs using automated labeling: 20 labels Box bounding labeling jobs (all): 10 labels The file is a JSON structure in the following format:
- *
- *
- *
- *
- *
- *
- *
- *
- *
- *
- *
- *
- *
- *
- * The version number. A set of conditions for stopping a labeling job. If any of the conditions are met, the
- * job is automatically stopped. Configuration information for automated data labeling. The name of the job. The name must be unique within an
+ * Amazon Web Services Region in the Amazon Web Services account. Configuration information required for human workers to complete a labeling
- * task. The endpoint configuration made by Inference Recommender during a recommendation job. An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in
- * different ways, for example, by purpose, owner, or environment. For more information,
- * see Tagging Amazon Web Services
- * Resources. The name of the endpoint made during a recommendation job. The location of the output produced by the labeling job. The name of the production variant (deployed model) made during a recommendation job. The instance type recommended by Amazon SageMaker Inference Recommender. The name of the lineage group. The number of instances recommended to launch initially. The metrics of recommendations. The name of the lineage group. Defines the cost per hour for the instance. The Amazon Resource Name (ARN) of the lineage group. Defines the cost per inference for the instance . The display name of the lineage group. The expected maximum number of requests per minute for the instance. The description of the lineage group. The expected model latency at maximum invocation per minute for the instance. The creation time of lineage group. A list of environment parameters suggested by the Amazon SageMaker Inference Recommender. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The environment key suggested by the Amazon SageMaker Inference Recommender. The last modified time of the lineage group. The value type suggested by the Amazon SageMaker Inference Recommender. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The value suggested by the Amazon SageMaker Inference Recommender. Defines the model configuration. Includes the specification name and environment parameters. The name of the model. The inference specification name in the model package version. Defines the environment parameters that includes key, value types, and values. A list of recommendations made by Amazon SageMaker Inference Recommender. Name of the SageMaker model. The metrics used to decide what recommendation to make. The location of the primary inference code, associated artifacts, and custom
- * environment map that the inference code uses when it is deployed in production.
- * Defines the endpoint configuration parameters. The containers in the inference pipeline. Defines the model configuration. Specifies details of how containers in a multi-container endpoint are called. The Amazon Resource Name (ARN) of the IAM role that you specified for the
- * model. A VpcConfig object that specifies the VPC that this model has access
- * to. For more information, see Protect Endpoints by Using an Amazon Virtual
- * Private Cloud
- * A timestamp that shows when the model was created. The Amazon Resource Name (ARN) of the model. If The name of the model bias job definition. The name must be unique within an Amazon Web Services Region
- * in the Amazon Web Services account. The name of the job. The name must be unique within an
+ * Amazon Web Services Region in the Amazon Web Services account. The job description that you provided when you initiated the job. The Amazon Resource Name (ARN) of the model bias job. The job type that you provided when you initiated the job. The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the
- * Amazon Web Services account. The Amazon Resource Name (ARN) of the job. The time at which the model bias job was created. The Amazon Resource Name (ARN) of the Amazon Web Services
+ * Identity and Access Management (IAM) role you provided when you initiated the job. The baseline configuration for a model bias job. The status of the job. Configures the model bias job to run a specified Docker container image. A timestamp that shows when the job was created. Inputs for the model bias job. A timestamp that shows when the job completed. The output configuration for monitoring jobs. A timestamp that shows when the job was last modified. Identifies the resources to deploy for a monitoring job. If the job fails, provides information why the job failed. Networking options for a model bias job. Returns information about the versioned model package Amazon Resource Name (ARN),
+ * the traffic pattern, and endpoint configurations you provided when you initiated the job. The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that
- * has read permission to the input data location and write permission to the output data
- * location in Amazon S3. The stopping conditions that you provided when you initiated the job. A time limit for how long the monitoring job is allowed to run before stopping. The recommendations made by Inference Recommender. The name of the model explainability job definition. The name must be unique within an
- * Amazon Web Services Region in the Amazon Web Services account. The name of the labeling job to return information for. The Amazon Resource Name (ARN) of the model explainability job. The name of the explainability job definition. The name must be unique within an Amazon Web Services
- * Region in the Amazon Web Services account. The time at which the model explainability job was created. The baseline configuration for a model explainability job. Configures the model explainability job to run a specified Docker container
- * image. Inputs for the model explainability job. Provides a breakdown of the number of objects labeled. The output configuration for monitoring jobs. The total number of objects labeled. Identifies the resources to deploy for a monitoring job. The total number of objects labeled by a human worker. Networking options for a model explainability job. The total number of objects labeled by automated data labeling. The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that
- * has read permission to the input data location and write permission to the output data
- * location in Amazon S3. The total number of objects that could not be labeled due to an error. A time limit for how long the monitoring job is allowed to run before stopping. The total number of objects not yet labeled. Specifies the location of the output produced by the labeling job. The name or Amazon Resource Name (ARN) of the model package to describe. When you specify a name, the name must have 1 to 63 characters. Valid
- * characters are a-z, A-Z, 0-9, and - (hyphen). The Amazon S3 bucket location of the manifest file for labeled data. The Amazon Resource Name (ARN) for the most recent SageMaker model trained as part of
+ * automated data labeling. Represents the overall status of a model package. The name of the model package for which the overall status is being reported. The processing status of the labeling job. The current status. Provides a breakdown of the number of data objects labeled by humans, the number of
+ * objects labeled by machine, the number of objects than couldn't be labeled, and the
+ * total number of objects labeled. if the overall status is If the job failed, the reason that it failed. The date and time that the labeling job was created. Specifies the validation and image scan statuses of the model package. The validation status of the model package. The date and time that the labeling job was last updated. The status of the scan of the Docker image container for the model package. A unique identifier for work done as part of a labeling job. The name of the model package being described. If the model is a versioned model, the name of the model group that the versioned
- * model belongs to. The version of the model package. The name assigned to the labeling job when it was created. The Amazon Resource Name (ARN) of the model package. The Amazon Resource Name (ARN) of the labeling job. A brief summary of the model package. The attribute used as the label in the output manifest file. A timestamp specifying when the model package was created. Input configuration information for the labeling job, such as the Amazon S3 location of the
+ * data objects and the location of the manifest file that describes the data
+ * objects. Details about inference jobs that can be run with models based on this model
- * package. The location of the job's output data and the Amazon Web Services Key Management Service key ID for the key used to
+ * encrypt the output data, if any. Details about the algorithm that was used to create the model package. The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf
+ * during data labeling. Configurations for one or more transform jobs that SageMaker runs to test the model
- * package. The S3 location of the JSON file that defines the categories used to label data
+ * objects. Please note the following label-category limits: Semantic segmentation labeling jobs using automated labeling: 20 labels Box bounding labeling jobs (all): 10 labels The file is a JSON structure in the following format:
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * The current status of the model package. A set of conditions for stopping a labeling job. If any of the conditions are met, the
+ * job is automatically stopped. Details about the current status of the model package. Configuration information for automated data labeling. Whether the model package is certified for listing on Amazon Web Services Marketplace. Configuration information required for human workers to complete a labeling
+ * task. The approval status of the model package. An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in
+ * different ways, for example, by purpose, owner, or environment. For more information,
+ * see Tagging Amazon Web Services
+ * Resources. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The location of the output produced by the labeling job. Metadata properties of the tracking entity, trial, or trial component. Metrics for the model. The name of the lineage group. The last time that the model package was modified. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The name of the lineage group. A description provided for the model approval. The Amazon Resource Name (ARN) of the lineage group. The metadata properties associated with the model package versions. The display name of the lineage group. Represents the drift check baselines that can be used when the model monitor is set using the model package.
- * For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines in the Amazon SageMaker Developer Guide.
- * The description of the lineage group. The machine learning domain of the model package you specified. Common machine
- * learning domains include computer vision and natural language processing. The creation time of lineage group. The machine learning task you specified that your model package accomplishes.
- * Common machine learning tasks include object detection and image classification. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. The Amazon Simple Storage Service (Amazon S3) path where the sample payload are stored. This path points to a single
- * gzip compressed tar archive (.tar.gz suffix). The last modified time of the lineage group. An array of additional Inference Specification objects. Each additional
- * Inference Specification specifies artifacts based on this model package that can
- * be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. The name of gthe model group to describe. The name of the model. The name of the model group. Name of the SageMaker model. The Amazon Resource Name (ARN) of the model group. The location of the primary inference code, associated artifacts, and custom
+ * environment map that the inference code uses when it is deployed in production.
+ * A description of the model group. The containers in the inference pipeline. The time that the model group was created. Specifies details of how containers in a multi-container endpoint are called. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The Amazon Resource Name (ARN) of the IAM role that you specified for the
+ * model. The status of the model group. A VpcConfig object that specifies the VPC that this model has access
+ * to. For more information, see Protect Endpoints by Using an Amazon Virtual
+ * Private Cloud
+ * A timestamp that shows when the model was created. The Amazon Resource Name (ARN) of the model. If The name of the model quality job. The name must be unique within an Amazon Web Services Region in the
- * Amazon Web Services account. The name of the model bias job definition. The name must be unique within an Amazon Web Services Region
+ * in the Amazon Web Services account. The Amazon Resource Name (ARN) of the model quality job. The Amazon Resource Name (ARN) of the model bias job. The name of the quality job definition. The name must be unique within an Amazon Web Services Region in
- * the Amazon Web Services account. The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the
+ * Amazon Web Services account. The time at which the model quality job was created. The time at which the model bias job was created. The baseline configuration for a model quality job. The baseline configuration for a model bias job. Configures the model quality job to run a specified Docker container image. Configures the model bias job to run a specified Docker container image. Inputs for the model quality job. Inputs for the model bias job. The output configuration for monitoring jobs. Identifies the resources to deploy for a monitoring job. Networking options for a model quality job. Networking options for a model bias job. The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to
- * perform tasks on your behalf. The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that
+ * has read permission to the input data location and write permission to the output data
+ * location in Amazon S3. Name of a previously created monitoring schedule. The name of the model explainability job definition. The name must be unique within an
+ * Amazon Web Services Region in the Amazon Web Services account. Summary of information about the last monitoring job to run. The name of the monitoring schedule. The Amazon Resource Name (ARN) of the model explainability job. The time the monitoring job was scheduled. The name of the explainability job definition. The name must be unique within an Amazon Web Services
+ * Region in the Amazon Web Services account. The time at which the monitoring job was created. The time at which the model explainability job was created. A timestamp that indicates the last time the monitoring job was modified. The baseline configuration for a model explainability job. The status of the monitoring job. Configures the model explainability job to run a specified Docker container
+ * image. The Amazon Resource Name (ARN) of the monitoring job. Inputs for the model explainability job. The name of the endpoint used to run the monitoring job. The output configuration for monitoring jobs. Contains the reason a monitoring job failed, if it failed. Identifies the resources to deploy for a monitoring job. The name of the monitoring job. Networking options for a model explainability job. The type of the monitoring job. The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that
+ * has read permission to the input data location and write permission to the output data
+ * location in Amazon S3. A time limit for how long the monitoring job is allowed to run before stopping. The Amazon Resource Name (ARN) of the monitoring schedule. Name of the monitoring schedule. The name or Amazon Resource Name (ARN) of the model package to describe. When you specify a name, the name must have 1 to 63 characters. Valid
+ * characters are a-z, A-Z, 0-9, and - (hyphen). The status of an monitoring job. The type of the monitoring job that this schedule runs. This is one of the following
- * values.
- *
- *
- *
- * Represents the overall status of a model package. A string, up to one KB in size, that contains the reason a monitoring job failed, if it
- * failed. The name of the model package for which the overall status is being reported. The time at which the monitoring job was created. The current status. The time at which the monitoring job was last modified. if the overall status is The configuration object that specifies the monitoring schedule and defines the
- * monitoring job. Specifies the validation and image scan statuses of the model package. The name of the endpoint for the monitoring job. The validation status of the model package. Describes metadata on the last execution to run, if there was one. The status of the scan of the Docker image container for the model package. The name of the notebook instance that you want information about. The name of the model package being described. If the model is a versioned model, the name of the model group that the versioned
+ * model belongs to. The version of the model package. The Amazon Resource Name (ARN) of the notebook instance. The Amazon Resource Name (ARN) of the model package. The name of the SageMaker notebook instance. A brief summary of the model package. The status of the notebook instance. A timestamp specifying when the model package was created. If status is Details about inference jobs that can be run with models based on this model
+ * package. The URL that you use to connect to the Jupyter notebook that is running in your
- * notebook instance. Details about the algorithm that was used to create the model package. The type of ML compute instance running on the notebook instance. Configurations for one or more transform jobs that SageMaker runs to test the model
+ * package. The ID of the VPC subnet. The current status of the model package. The IDs of the VPC security groups. Details about the current status of the model package. The Amazon Resource Name (ARN) of the IAM role associated with the instance.
- * Whether the model package is certified for listing on Amazon Web Services Marketplace. The Amazon Web Services KMS key ID SageMaker uses to encrypt data when storing it on the ML storage
- * volume attached to the instance. The approval status of the model package. The network interface IDs that SageMaker created at the time of creating the instance.
- * Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. A timestamp. Use this parameter to retrieve the time when the notebook instance was
- * last modified. Metadata properties of the tracking entity, trial, or trial component. A timestamp. Use this parameter to return the time when the notebook instance was
- * created Metrics for the model. Returns the name of a notebook instance lifecycle configuration. For information about notebook instance lifestyle configurations, see Step
- * 2.1: (Optional) Customize a Notebook Instance
- * The last time that the model package was modified. Describes whether SageMaker provides internet access to the notebook instance. If this
- * value is set to Disabled, the notebook instance does not have
- * internet access, and cannot connect to SageMaker training and endpoint services. For more information, see Notebook Instances Are Internet-Enabled by Default. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. The size, in GB, of the ML storage volume attached to the notebook instance. A description provided for the model approval. A list of the Elastic Inference (EI) instance types associated with this notebook
- * instance. Currently only one EI instance type can be associated with a notebook
- * instance. For more information, see Using Elastic Inference in Amazon
- * SageMaker. The metadata properties associated with the model package versions. The Git repository associated with the notebook instance as its default code
- * repository. This can be either the name of a Git repository stored as a resource in your
- * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any
- * other Git repository. When you open a notebook instance, it opens in the directory that
- * contains this repository. For more information, see Associating Git Repositories with SageMaker
- * Notebook Instances. Represents the drift check baselines that can be used when the model monitor is set using the model package.
+ * For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines in the Amazon SageMaker Developer Guide.
+ * An array of up to three Git repositories associated with the notebook instance. These
- * can be either the names of Git repositories stored as resources in your account, or the
- * URL of Git repositories in Amazon Web Services CodeCommit or in any
- * other Git repository. These repositories are cloned at the same level as the default
- * repository of your notebook instance. For more information, see Associating Git
- * Repositories with SageMaker Notebook Instances. The machine learning domain of the model package you specified. Common machine
+ * learning domains include computer vision and natural language processing. Whether root access is enabled or disabled for users of the notebook instance. Lifecycle configurations need root access to be able to set up a notebook
- * instance. Because of this, lifecycle configurations associated with a notebook
- * instance always run with root access even if you disable root access for
- * users. The machine learning task you specified that your model package accomplishes.
+ * Common machine learning tasks include object detection and image classification. The platform identifier of the notebook instance runtime environment. The Amazon Simple Storage Service (Amazon S3) path where the sample payload are stored. This path points to a single
+ * gzip compressed tar archive (.tar.gz suffix). Information on the IMDS configuration of the notebook instance An array of additional Inference Specification objects. Each additional
+ * Inference Specification specifies artifacts based on this model package that can
+ * be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts. The name of the lifecycle configuration to describe. The name of gthe model group to describe. The Amazon Resource Name (ARN) of the lifecycle configuration. The name of the model group. The name of the lifecycle configuration. The Amazon Resource Name (ARN) of the model group. The shell script that runs only once, when you create a notebook instance. A description of the model group. The shell script that runs every time you start a notebook instance, including when
- * you create the notebook instance. The time that the model group was created. A timestamp that tells when the lifecycle configuration was last modified. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. A timestamp that tells when the lifecycle configuration was created. The status of the model group. The name of the pipeline to describe. The name of the model quality job. The name must be unique within an Amazon Web Services Region in the
+ * Amazon Web Services account. The Amazon Resource Name (ARN) of the pipeline. The Amazon Resource Name (ARN) of the model quality job. The name of the pipeline. The name of the quality job definition. The name must be unique within an Amazon Web Services Region in
+ * the Amazon Web Services account. The display name of the pipeline. The time at which the model quality job was created. The JSON pipeline definition. The baseline configuration for a model quality job. The description of the pipeline. Configures the model quality job to run a specified Docker container image. The Amazon Resource Name (ARN) that the pipeline uses to execute. Inputs for the model quality job. The status of the pipeline execution. The output configuration for monitoring jobs. The time when the pipeline was created. Identifies the resources to deploy for a monitoring job. The time when the pipeline was last modified. Networking options for a model quality job. The time when the pipeline was last run. The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to
+ * perform tasks on your behalf. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. A time limit for how long the monitoring job is allowed to run before stopping. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. Lists the parallelism configuration applied to the pipeline. Name of a previously created monitoring schedule. Summary of information about the last monitoring job to run. The Amazon Resource Name (ARN) of the pipeline execution. The name of the monitoring schedule. The time the monitoring job was scheduled. The JSON pipeline definition. The time at which the monitoring job was created. The time when the pipeline was created. A timestamp that indicates the last time the monitoring job was modified. The status of the monitoring job. The Amazon Resource Name (ARN) of the pipeline execution. The Amazon Resource Name (ARN) of the monitoring job. The name of the endpoint used to run the monitoring job. Contains the reason a monitoring job failed, if it failed. Specifies the names of the experiment and trial created by a pipeline. The name of the experiment. The name of the monitoring job. The name of the trial. The type of the monitoring job. The Amazon Resource Name (ARN) of the pipeline. The Amazon Resource Name (ARN) of the pipeline execution. The display name of the pipeline execution. The Amazon Resource Name (ARN) of the monitoring schedule. The status of the pipeline execution. Name of the monitoring schedule. The description of the pipeline execution. The status of an monitoring job. Specifies the names of the experiment and trial created by a pipeline. The type of the monitoring job that this schedule runs. This is one of the following
+ * values.
+ *
+ *
+ *
+ * If the execution failed, a message describing why. A string, up to one KB in size, that contains the reason a monitoring job failed, if it
+ * failed. The time when the pipeline execution was created. The time at which the monitoring job was created. The time when the pipeline execution was modified last. The time at which the monitoring job was last modified. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The configuration object that specifies the monitoring schedule and defines the
+ * monitoring job. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The name of the endpoint for the monitoring job. The parallelism configuration applied to the pipeline. Describes metadata on the last execution to run, if there was one. The name of the processing job. The name must be unique within an Amazon Web Services Region in the
- * Amazon Web Services account. The name of the notebook instance that you want information about. The inputs for a processing job. The Amazon Resource Name (ARN) of the notebook instance. Output configuration for the processing job. The name of the SageMaker notebook instance. The name of the processing job. The name must be unique within an Amazon Web Services Region in the
- * Amazon Web Services account. The status of the notebook instance. Identifies the resources, ML compute instances, and ML storage volumes to deploy for a
- * processing job. In distributed training, you specify more than one instance. If status is The time limit for how long the processing job is allowed to run. The URL that you use to connect to the Jupyter notebook that is running in your
+ * notebook instance. Configures the processing job to run a specified container image. The type of ML compute instance running on the notebook instance. The environment variables set in the Docker container. The ID of the VPC subnet. Networking options for a processing job. The IDs of the VPC security groups. The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on
- * your behalf. The Amazon Resource Name (ARN) of the IAM role associated with the instance.
+ * The configuration information used to create an experiment. The Amazon Web Services KMS key ID SageMaker uses to encrypt data when storing it on the ML storage
+ * volume attached to the instance. The Amazon Resource Name (ARN) of the processing job. The network interface IDs that SageMaker created at the time of creating the instance.
+ * Provides the status of a processing job. A timestamp. Use this parameter to retrieve the time when the notebook instance was
+ * last modified. An optional string, up to one KB in size, that contains metadata from the processing
- * container when the processing job exits. A timestamp. Use this parameter to return the time when the notebook instance was
+ * created A string, up to one KB in size, that contains the reason a processing job failed, if
- * it failed. Returns the name of a notebook instance lifecycle configuration. For information about notebook instance lifestyle configurations, see Step
+ * 2.1: (Optional) Customize a Notebook Instance
+ * The time at which the processing job completed. Describes whether SageMaker provides internet access to the notebook instance. If this
+ * value is set to Disabled, the notebook instance does not have
+ * internet access, and cannot connect to SageMaker training and endpoint services. For more information, see Notebook Instances Are Internet-Enabled by Default. The time at which the processing job started. The size, in GB, of the ML storage volume attached to the notebook instance. The time at which the processing job was last modified. A list of the Elastic Inference (EI) instance types associated with this notebook
+ * instance. Currently only one EI instance type can be associated with a notebook
+ * instance. For more information, see Using Elastic Inference in Amazon
+ * SageMaker. The time at which the processing job was created. The Git repository associated with the notebook instance as its default code
+ * repository. This can be either the name of a Git repository stored as a resource in your
+ * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any
+ * other Git repository. When you open a notebook instance, it opens in the directory that
+ * contains this repository. For more information, see Associating Git Repositories with SageMaker
+ * Notebook Instances. The ARN of a monitoring schedule for an endpoint associated with this processing
- * job. An array of up to three Git repositories associated with the notebook instance. These
+ * can be either the names of Git repositories stored as resources in your account, or the
+ * URL of Git repositories in Amazon Web Services CodeCommit or in any
+ * other Git repository. These repositories are cloned at the same level as the default
+ * repository of your notebook instance. For more information, see Associating Git
+ * Repositories with SageMaker Notebook Instances. The ARN of an AutoML job associated with this processing job. Whether root access is enabled or disabled for users of the notebook instance. Lifecycle configurations need root access to be able to set up a notebook
+ * instance. Because of this, lifecycle configurations associated with a notebook
+ * instance always run with root access even if you disable root access for
+ * users. The ARN of a training job associated with this processing job. The platform identifier of the notebook instance runtime environment. Information on the IMDS configuration of the notebook instance The name of the project to describe. The name of the lifecycle configuration to describe. The Amazon Resource Name (ARN) of the lifecycle configuration. Details of a provisioned service catalog product. For information about service catalog,
- * see What is Amazon Web Services Service
- * Catalog. The ID of the provisioned product. The name of the lifecycle configuration. The current status of the product.
- *
- *
- *
- *
- * The shell script that runs only once, when you create a notebook instance. The shell script that runs every time you start a notebook instance, including when
+ * you create the notebook instance. A timestamp that tells when the lifecycle configuration was last modified. A timestamp that tells when the lifecycle configuration was created. The Amazon Resource Name (ARN) of the project. The name of the pipeline to describe. The name of the project. The ID of the project. The Amazon Resource Name (ARN) of the pipeline. The description of the project. The name of the pipeline. Information used to provision a service catalog product. For information, see What is Amazon Web Services Service
- * Catalog. The display name of the pipeline. Information about a provisioned service catalog product. The JSON pipeline definition. The status of the project. The description of the pipeline. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The Amazon Resource Name (ARN) that the pipeline uses to execute. The time when the project was created. The status of the pipeline execution. The timestamp when project was last modified. The time when the pipeline was created. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The time when the pipeline was last modified. The time when the pipeline was last run. The name of the Studio Lifecycle Configuration to describe. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. Lists the parallelism configuration applied to the pipeline. The ARN of the Lifecycle Configuration to describe. The Amazon Resource Name (ARN) of the pipeline execution. The name of the Studio Lifecycle Configuration that is described. The creation time of the Studio Lifecycle Configuration. The JSON pipeline definition. This value is equivalent to CreationTime because Studio Lifecycle Configurations are immutable. The time when the pipeline was created. The content of your Studio Lifecycle Configuration script. The App type that the Lifecycle Configuration is attached to. The Amazon Resource Name (ARN) of the pipeline execution. Specifies the names of the experiment and trial created by a pipeline. The Amazon Resource Name (ARN) of the subscribed work team to describe. The name of the experiment. The name of the trial. Describes a work team of a vendor that does the a labelling job. The Amazon Resource Name (ARN) of the vendor that you have subscribed. The Amazon Resource Name (ARN) of the pipeline. The title of the service provided by the vendor in the Amazon Marketplace. The Amazon Resource Name (ARN) of the pipeline execution. The name of the vendor in the Amazon Marketplace. The display name of the pipeline execution. The description of the vendor from the Amazon Marketplace. The status of the pipeline execution. Marketplace product listing ID. The description of the pipeline execution. Specifies the names of the experiment and trial created by a pipeline. A If the execution failed, a message describing why. The time when the pipeline execution was created. The time when the pipeline execution was modified last. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. The parallelism configuration applied to the pipeline. The name of the training job. The name of the processing job. The name must be unique within an Amazon Web Services Region in the
+ * Amazon Web Services account. The name, value, and date and time of a metric that was emitted to Amazon CloudWatch. The name of the metric. The inputs for a processing job. The value of the metric. Output configuration for the processing job. The date and time that the algorithm emitted the metric. The name of the processing job. The name must be unique within an Amazon Web Services Region in the
+ * Amazon Web Services account. Identifies the resources, ML compute instances, and ML storage volumes to deploy for a
+ * processing job. In distributed training, you specify more than one instance. Information about the status of the rule evaluation. The name of the rule configuration. The time limit for how long the processing job is allowed to run. The Amazon Resource Name (ARN) of the rule evaluation job. Configures the processing job to run a specified container image. Status of the rule evaluation. The environment variables set in the Docker container. Details from the rule evaluation. Networking options for a processing job. Timestamp when the rule evaluation status was last modified. The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on
+ * your behalf. The configuration information used to create an experiment. An array element of DescribeTrainingJobResponse$SecondaryStatusTransitions. It provides
- * additional details about a status that the training job has transitioned through. A
- * training job can be in one of several states, for example, starting, downloading,
- * training, or uploading. Within each state, there are a number of intermediate states.
- * For example, within the starting state, SageMaker could be starting the training job or
- * launching the ML instances. These transitional states are referred to as the job's
- * secondary
- * status.
- * Contains a secondary status information from a training
- * job. Status might be one of the following secondary statuses:
- *
- *
- *
- *
- *
- *
- *
- *
- * We no longer support the following secondary statuses:
- *
- *
- * A timestamp that shows when the training job transitioned to the current secondary
- * status state. A timestamp that shows when the training job transitioned out of this secondary status
- * state into another secondary status state or when the training job has ended. A detailed description of the progress within a secondary status.
- * SageMaker provides secondary statuses and status messages that apply to each of
- * them: Starting the training job. Launching requested ML
- * instances. Insufficient
- * capacity error from EC2 while launching instances,
- * retrying! Launched
- * instance was unhealthy, replacing it! Preparing the instances for training. Downloading the training image. Training
- * image download completed. Training in
- * progress. Status messages are subject to change. Therefore, we recommend not including them
- * in code that programmatically initiates actions. For examples, don't use status
- * messages in if statements. To have an overview of your training job's progress, view
- *
- *
- *
- * Name of the model training job. The Amazon Resource Name (ARN) of the training job. The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the
- * training job was launched by a hyperparameter tuning job. The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the
- * transform or training job. The Amazon Resource Name (ARN) of an AutoML job. Information about the Amazon S3 location that is configured for storing model artifacts.
- * The status of the training job. SageMaker provides the following training job statuses:
- *
- *
- *
- *
- * For more detailed information, see Provides detailed information about the state of the training job. For detailed
- * information on the secondary status of the training job, see SageMaker provides primary statuses and secondary statuses that apply to each of
- * them:
- *
- *
- *
- *
- *
- *
- *
- *
- *
- *
- * Valid values for We no longer support the following secondary statuses:
- *
- *
- * If the training job failed, the reason it failed. Algorithm-specific parameters. Information about the algorithm used for training, and algorithm metadata.
- * The Amazon Web Services Identity and Access Management (IAM) role configured for the training job. An array of The S3 path where model artifacts that you configured when creating the job are
- * stored. SageMaker creates subfolders for model artifacts. Resources, including ML compute instances and ML storage volumes, that are
- * configured for model training. The Amazon Resource Name (ARN) of the processing job. A VpcConfig object that specifies the VPC that this training job has
- * access to. For more information, see Protect Training Jobs by Using an Amazon
- * Virtual Private Cloud. Provides the status of a processing job. Specifies a limit to how long a model training job can run. It also specifies how long
- * a managed Spot training job has to complete. When the job reaches the time limit, SageMaker
- * ends the training job. Use this API to cap model training costs. To stop a job, SageMaker sends the algorithm the An optional string, up to one KB in size, that contains metadata from the processing
+ * container when the processing job exits. A timestamp that indicates when the training job was created. A string, up to one KB in size, that contains the reason a processing job failed, if
+ * it failed. Indicates the time when the training job starts on training instances. You are
- * billed for the time interval between this time and the value of
- * The time at which the processing job completed. Indicates the time when the training job ends on training instances. You are billed
- * for the time interval between the value of The time at which the processing job started. A timestamp that indicates when the status of the training job was last
- * modified. The time at which the processing job was last modified. A history of all of the secondary statuses that the training job has transitioned
- * through. The time at which the processing job was created. A collection of The ARN of a monitoring schedule for an endpoint associated with this processing
+ * job. If you want to allow inbound or outbound network calls, except for calls between peers
- * within a training cluster for distributed training, choose The ARN of an AutoML job associated with this processing job. To encrypt all communications between ML compute instances in distributed training,
- * choose The ARN of a training job associated with this processing job. A Boolean indicating whether managed spot training is enabled ( Contains information about the output location for managed spot training checkpoint
- * data. The name of the project to describe. The training time in seconds. The billable time in seconds. Billable time refers to the absolute wall-clock
- * time. Multiply You can calculate the savings from using managed spot training using the formula
- * Details of a provisioned service catalog product. For information about service catalog,
+ * see What is Amazon Web Services Service
+ * Catalog. Configuration information for the Debugger hook parameters, metric and tensor collections, and
- * storage paths. To learn more about
- * how to configure the The ID of the provisioned product. Associates a SageMaker job as a trial component with an experiment and trial. Specified when
- * you call the following APIs: The current status of the product.
- * CreateProcessingJob
- *
+ *
- * CreateTrainingJob
- *
+ *
- * CreateTransformJob
- *
+ *
+ *
+ * Configuration information for Debugger rules for debugging output tensors. Configuration of storage locations for the Debugger TensorBoard output data. Evaluation status of Debugger rules for debugging on a training job. Configuration information for Debugger system monitoring, framework profiling, and
- * storage paths. Configuration information for Debugger rules for profiling system and framework
- * metrics. Evaluation status of Debugger rules for profiling on a training job. Profiling status of a training job. The number of times to retry the job when the job fails due to an
- * The environment variables to set in the Docker container. The name of the transform job that you want to view details of. The name of the transform job. The Amazon Resource Name (ARN) of the transform job. The
- * status of the transform job. If the transform job failed, the reason
- * is returned in the The Amazon Resource Name (ARN) of the project. If the transform job failed, The name of the project. The name of the model used in the transform job. The ID of the project. The
- * maximum number
- * of
- * parallel requests on each instance node
- * that can be launched in a transform job. The default value is 1. The description of the project. The timeout and maximum number of retries for processing a transform job
- * invocation. Information used to provision a service catalog product. For information, see What is Amazon Web Services Service
+ * Catalog. The
- * maximum
- * payload size, in MB, used in the
- * transform job. Information about a provisioned service catalog product. Specifies the number of records to include in a mini-batch for an HTTP inference
- * request.
- * A record
- * is a single unit of input data that inference
- * can be made on. For example, a single line in a CSV file is a record. To enable the batch strategy, you must set The status of the project. The
- * environment variables to set in the Docker container. We support up to 16 key and values
- * entries in the map. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. Describes the dataset to be transformed and the Amazon S3 location where it is
- * stored. The time when the project was created. Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the
- * transform job. The timestamp when project was last modified. Describes
- * the resources, including ML instance types and ML instance count, to
- * use for the transform job. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. A timestamp that shows when the transform Job was created. Indicates when the transform job starts
- * on
- * ML instances. You are billed for the time interval between this time
- * and the value of The name of the Studio Lifecycle Configuration to describe. Indicates when the transform job has been
- *
- * completed, or has stopped or failed. You are billed for the time
- * interval between this time and the value of The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the
- * transform or training job. The ARN of the Lifecycle Configuration to describe. The Amazon Resource Name (ARN) of the AutoML transform job. The name of the Studio Lifecycle Configuration that is described. The data structure used to specify the data to be used for inference in a batch
- * transform job and to associate the data that is relevant to the prediction results in
- * the output. The input filter provided allows you to exclude input data that is not
- * needed for inference in a batch transform job. The output filter provided allows you to
- * include input data relevant to interpreting the predictions in the output from the job.
- * For more information, see Associate Prediction
- * Results with their Corresponding Input Records. The creation time of the Studio Lifecycle Configuration. Associates a SageMaker job as a trial component with an experiment and trial. Specified when
- * you call the following APIs:
- * CreateProcessingJob
- *
- * CreateTrainingJob
- *
- * CreateTransformJob
- * This value is equivalent to CreationTime because Studio Lifecycle Configurations are immutable. The content of your Studio Lifecycle Configuration script. The name of the trial to describe. The App type that the Lifecycle Configuration is attached to. The source of the trial. The Amazon Resource Name (ARN) of the source. The source job type. The Amazon Resource Name (ARN) of the subscribed work team to describe. The name of the trial. Describes a work team of a vendor that does the a labelling job. The Amazon Resource Name (ARN) of the trial. The Amazon Resource Name (ARN) of the vendor that you have subscribed. The name of the trial as displayed. If The title of the service provided by the vendor in the Amazon Marketplace. The name of the experiment the trial is part of. The name of the vendor in the Amazon Marketplace. The Amazon Resource Name (ARN) of the source and, optionally, the job type. The description of the vendor from the Amazon Marketplace. When the trial was created. Marketplace product listing ID. Who created the trial. When the trial was last modified. A Who last modified the trial. Metadata properties of the tracking entity, trial, or trial component. The name of the training job. The name, value, and date and time of a metric that was emitted to Amazon CloudWatch. The name of the metric. The value of the metric. The name of the trial component to describe. The date and time that the algorithm emitted the metric. A summary of the metrics of a trial component. Information about the status of the rule evaluation. The name of the metric. The name of the rule configuration. The Amazon Resource Name (ARN) of the source. The Amazon Resource Name (ARN) of the rule evaluation job. When the metric was last updated. Status of the rule evaluation. The maximum value of the metric. Details from the rule evaluation. The minimum value of the metric. Timestamp when the rule evaluation status was last modified. The most recent value of the metric. An array element of DescribeTrainingJobResponse$SecondaryStatusTransitions. It provides
+ * additional details about a status that the training job has transitioned through. A
+ * training job can be in one of several states, for example, starting, downloading,
+ * training, or uploading. Within each state, there are a number of intermediate states.
+ * For example, within the starting state, SageMaker could be starting the training job or
+ * launching the ML instances. These transitional states are referred to as the job's
+ * secondary
+ * status.
+ * The number of samples used to generate the metric. Contains a secondary status information from a training
+ * job. Status might be one of the following secondary statuses:
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * We no longer support the following secondary statuses:
+ *
+ *
+ * The average value of the metric. A timestamp that shows when the training job transitioned to the current secondary
+ * status state. The standard deviation of the metric. A timestamp that shows when the training job transitioned out of this secondary status
+ * state into another secondary status state or when the training job has ended. A detailed description of the progress within a secondary status.
+ * SageMaker provides secondary statuses and status messages that apply to each of
+ * them: Starting the training job. Launching requested ML
+ * instances. Insufficient
+ * capacity error from EC2 while launching instances,
+ * retrying! Launched
+ * instance was unhealthy, replacing it! Preparing the instances for training. Downloading the training image. Training
+ * image download completed. Training in
+ * progress. Status messages are subject to change. Therefore, we recommend not including them
+ * in code that programmatically initiates actions. For examples, don't use status
+ * messages in if statements. To have an overview of your training job's progress, view
+ *
+ *
+ *
+ * The Amazon Resource Name (ARN) and job type of the source of a trial component. The source ARN. Name of the model training job. The source job type. The Amazon Resource Name (ARN) of the training job. The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the
+ * training job was launched by a hyperparameter tuning job. The name of the trial component. The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the
+ * transform or training job. The Amazon Resource Name (ARN) of the trial component. The Amazon Resource Name (ARN) of an AutoML job. The name of the component as displayed. If Information about the Amazon S3 location that is configured for storing model artifacts.
+ * The Amazon Resource Name (ARN) of the source and, optionally, the job type. The status of the training job. SageMaker provides the following training job statuses:
+ *
+ *
+ *
+ *
+ * For more detailed information, see The status of the component. States include: Provides detailed information about the state of the training job. For detailed
+ * information on the secondary status of the training job, see SageMaker provides primary statuses and secondary statuses that apply to each of
+ * them:
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * Valid values for We no longer support the following secondary statuses: InProgress
+ * Completed
+ * Failed
+ * When the component started. If the training job failed, the reason it failed. When the component ended. Algorithm-specific parameters. When the component was created. Information about the algorithm used for training, and algorithm metadata.
+ * Who created the trial component. The Amazon Web Services Identity and Access Management (IAM) role configured for the training job. When the component was last modified. An array of Who last modified the component. The S3 path where model artifacts that you configured when creating the job are
+ * stored. SageMaker creates subfolders for model artifacts. The hyperparameters of the component. Resources, including ML compute instances and ML storage volumes, that are
+ * configured for model training. The input artifacts of the component. A VpcConfig object that specifies the VPC that this training job has
+ * access to. For more information, see Protect Training Jobs by Using an Amazon
+ * Virtual Private Cloud. The output artifacts of the component. Specifies a limit to how long a model training job can run. It also specifies how long
+ * a managed Spot training job has to complete. When the job reaches the time limit, SageMaker
+ * ends the training job. Use this API to cap model training costs. To stop a job, SageMaker sends the algorithm the A timestamp that indicates when the training job was created. Indicates the time when the training job starts on training instances. You are
+ * billed for the time interval between this time and the value of
+ * Indicates the time when the training job ends on training instances. You are billed
+ * for the time interval between the value of A timestamp that indicates when the status of the training job was last
+ * modified. A history of all of the secondary statuses that the training job has transitioned
+ * through. Metadata properties of the tracking entity, trial, or trial component. A collection of The metrics for the component. If you want to allow inbound or outbound network calls, except for calls between peers
+ * within a training cluster for distributed training, choose The Amazon Resource Name (ARN) of the lineage group. To encrypt all communications between ML compute instances in distributed training,
+ * choose A Boolean indicating whether managed spot training is enabled ( The domain ID. Contains information about the output location for managed spot training checkpoint
+ * data. The user profile name. This value is not case sensitive. The training time in seconds. The billable time in seconds. Billable time refers to the absolute wall-clock
+ * time. Multiply You can calculate the savings from using managed spot training using the formula
+ * The ID of the domain that contains the profile. Configuration information for the Debugger hook parameters, metric and tensor collections, and
+ * storage paths. To learn more about
+ * how to configure the The user profile Amazon Resource Name (ARN). Associates a SageMaker job as a trial component with an experiment and trial. Specified when
+ * you call the following APIs:
+ * CreateProcessingJob
+ *
+ * CreateTrainingJob
+ *
+ * CreateTransformJob
+ * The user profile name. Configuration information for Debugger rules for debugging output tensors. The ID of the user's profile in the Amazon Elastic File System (EFS) volume. Configuration of storage locations for the Debugger TensorBoard output data. The status. Evaluation status of Debugger rules for debugging on a training job. The last modified time. Configuration information for Debugger system monitoring, framework profiling, and
+ * storage paths. The creation time. Configuration information for Debugger rules for profiling system and framework
+ * metrics. The failure reason. Evaluation status of Debugger rules for profiling on a training job. The SSO user identifier. Profiling status of a training job. The SSO user value. The number of times to retry the job when the job fails due to an
+ * A collection of settings. The environment variables to set in the Docker container. The name of the private workforce whose access you want to restrict.
- * The name of the transform job that you want to view details of. Your OIDC IdP workforce configuration. The OIDC IdP client ID used to configure your private workforce. The OIDC IdP issuer used to configure your private workforce. The OIDC IdP authorization endpoint used to configure your private workforce. The OIDC IdP token endpoint used to configure your private workforce. The OIDC IdP user information endpoint used to configure your private workforce. The name of the transform job. The OIDC IdP logout endpoint used to configure your private workforce. The Amazon Resource Name (ARN) of the transform job. The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. The
+ * status of the transform job. If the transform job failed, the reason
+ * is returned in the If the transform job failed, A VpcConfig object that specifies the VPC that you want your workforce to connect to. The ID of the VPC that the workforce uses for communication. The name of the model used in the transform job. The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet. The
+ * maximum number
+ * of
+ * parallel requests on each instance node
+ * that can be launched in a transform job. The default value is 1. The ID of the subnets in the VPC that you want to connect. The timeout and maximum number of retries for processing a transform job
+ * invocation. The IDs for the VPC service endpoints of your VPC workforce when it is created and updated. The
+ * maximum
+ * payload size, in MB, used in the
+ * transform job. Specifies the number of records to include in a mini-batch for an HTTP inference
+ * request.
+ * A record
+ * is a single unit of input data that inference
+ * can be made on. For example, a single line in a CSV file is a record. To enable the batch strategy, you must set A single private workforce, which is automatically created when you create your first
- * private work team. You can create one private work force in each Amazon Web Services Region. By default,
- * any workforce-related API operation used in a specific region will apply to the
- * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce. The name of the private workforce. The
+ * environment variables to set in the Docker container. We support up to 16 key and values
+ * entries in the map. The Amazon Resource Name (ARN) of the private workforce. Describes the dataset to be transformed and the Amazon S3 location where it is
+ * stored. The most recent date that was used to
- * successfully add one or more IP address ranges (CIDRs) to a private workforce's
- * allow list. Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the
+ * transform job. A list of one to ten IP address ranges (CIDRs) to be added to the
- * workforce allow list. By default, a workforce isn't restricted to specific IP addresses. Describes
+ * the resources, including ML instance types and ML instance count, to
+ * use for the transform job. The subdomain for your OIDC Identity Provider. A timestamp that shows when the transform Job was created. The configuration of an Amazon Cognito workforce.
- * A single Cognito workforce is created using and corresponds to a single
- *
- * Amazon Cognito user pool. Indicates when the transform job starts
+ * on
+ * ML instances. You are billed for the time interval between this time
+ * and the value of The configuration of an OIDC Identity Provider (IdP) private workforce. Indicates when the transform job has been
+ *
+ * completed, or has stopped or failed. You are billed for the time
+ * interval between this time and the value of The date that the workforce is created. The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the
+ * transform or training job. The configuration of a VPC workforce. The Amazon Resource Name (ARN) of the AutoML transform job. The status of your workforce. The data structure used to specify the data to be used for inference in a batch
+ * transform job and to associate the data that is relevant to the prediction results in
+ * the output. The input filter provided allows you to exclude input data that is not
+ * needed for inference in a batch transform job. The output filter provided allows you to
+ * include input data relevant to interpreting the predictions in the output from the job.
+ * For more information, see Associate Prediction
+ * Results with their Corresponding Input Records. The reason your workforce failed. Associates a SageMaker job as a trial component with an experiment and trial. Specified when
+ * you call the following APIs:
+ * CreateProcessingJob
+ *
+ * CreateTrainingJob
+ *
+ * CreateTransformJob
+ * A single private workforce, which is automatically created when you create your first
- * private work team. You can create one private work force in each Amazon Web Services Region. By default,
- * any workforce-related API operation used in a specific region will apply to the
- * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce. The name of the trial to describe. The source of the trial. The name of the work team to return a description of. The Amazon Resource Name (ARN) of the source. The source job type. Provides details about a labeling work team. The name of the work team. The name of the trial. A list of Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP).
- * For private workforces created using Amazon Cognito use
- * The Amazon Resource Name (ARN) of the trial. The Amazon Resource Name (ARN) that identifies the work team. The name of the trial as displayed. If The Amazon Resource Name (ARN) of the workforce. The name of the experiment the trial is part of. The Amazon Marketplace identifier for a vendor's work team. The Amazon Resource Name (ARN) of the source and, optionally, the job type. A description of the work team. When the trial was created. The URI of the labeling job's user interface. Workers open this URI to start labeling
- * your data objects. Who created the trial. The date and time that the work team was created (timestamp). When the trial was last modified. The date and time that the work team was last updated (timestamp). Who last modified the trial. Configures SNS notifications of available or expiring work items for work
- * teams. Metadata properties of the tracking entity, trial, or trial component. A The name of the trial component to describe. Specifies weight and capacity values for a production variant. A summary of the metrics of a trial component. The name of the variant to update. The variant's weight. The variant's capacity. The name of the metric. Information of a particular device. The name of the device. The Amazon Resource Name (ARN) of the source. Description of the device. When the metric was last updated. Amazon Web Services Internet of Things (IoT) object name. The maximum value of the metric. The minimum value of the metric. Summary of the device fleet. Amazon Resource Name (ARN) of the device fleet. The most recent value of the metric. Name of the device fleet. The number of samples used to generate the metric. Timestamp of when the device fleet was created. The average value of the metric. Timestamp of when the device fleet was last updated. The standard deviation of the metric. Status of devices. The Amazon Resource Name (ARN) and job type of the source of a trial component. The number of devices connected with a heartbeat. The source ARN. The number of registered devices. The source job type. Summary of model on edge device. The name of the model. The name of the trial component. The version model. The Amazon Resource Name (ARN) of the trial component. The name of the component as displayed. If Summary of the device. The unique identifier of the device. The Amazon Resource Name (ARN) of the source and, optionally, the job type. Amazon Resource Name (ARN) of the device. The status of the component. States include: InProgress Completed Failed A description of the device. When the component started. The name of the fleet the device belongs to. When the component ended. The Amazon Web Services Internet of Things (IoT) object thing name associated with the device.. When the component was created. The timestamp of the last registration or de-reregistration. Who created the trial component. The last heartbeat received from the device. When the component was last modified. Models on the device. Who last modified the component. Edge Manager agent version. The hyperparameters of the component. The input artifacts of the component. The output artifacts of the component. Metadata properties of the tracking entity, trial, or trial component. The name of the component to disassociate from the trial. The metrics for the component. The name of the trial to disassociate from. The Amazon Resource Name (ARN) of the lineage group. The ARN of the trial component. The domain ID. The Amazon Resource Name (ARN) of the trial. The user profile name. This value is not case sensitive. The domain's details. The domain's Amazon Resource Name (ARN). The ID of the domain that contains the profile. The domain ID. The user profile Amazon Resource Name (ARN). The domain name. The user profile name. The status. The ID of the user's profile in the Amazon Elastic File System (EFS) volume. The creation time. The status. The last modified time. The domain's URL. The creation time. The failure reason. The SSO user identifier. A collection of settings that update the current configuration for the The execution role for the The SSO user value. Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that
- * the version runs on. A collection of settings. A collection of A collection of The name of the private workforce whose access you want to restrict.
+ * A directed edge connecting two lineage entities. Your OIDC IdP workforce configuration. The Amazon Resource Name (ARN) of the source lineage entity of the directed edge. The OIDC IdP client ID used to configure your private workforce. The Amazon Resource Name (ARN) of the destination lineage entity of the directed edge. The OIDC IdP issuer used to configure your private workforce. The type of the Association(Edge) between the source and destination. For example The OIDC IdP authorization endpoint used to configure your private workforce. The OIDC IdP token endpoint used to configure your private workforce. The OIDC IdP user information endpoint used to configure your private workforce. The OIDC IdP logout endpoint used to configure your private workforce. The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. Status of edge devices with this model. A VpcConfig object that specifies the VPC that you want your workforce to connect to. The name of the model. The model version. The number of devices that have this model version and do not have a heart beat. The ID of the VPC that the workforce uses for communication. The number of devices that have this model version and have a heart beat. The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet. The number of devices that have this model version, a heart beat, and are currently running. The ID of the subnets in the VPC that you want to connect. The number of devices with this model version and are producing sample data. The IDs for the VPC service endpoints of your VPC workforce when it is created and updated. Summary of edge packaging job. A single private workforce, which is automatically created when you create your first
+ * private work team. You can create one private work force in each Amazon Web Services Region. By default,
+ * any workforce-related API operation used in a specific region will apply to the
+ * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce. The Amazon Resource Name (ARN) of the edge packaging job. The name of the private workforce. The name of the edge packaging job. The Amazon Resource Name (ARN) of the private workforce. The status of the edge packaging job. The most recent date that was used to
+ * successfully add one or more IP address ranges (CIDRs) to a private workforce's
+ * allow list. The name of the SageMaker Neo compilation job. A list of one to ten IP address ranges (CIDRs) to be added to the
+ * workforce allow list. By default, a workforce isn't restricted to specific IP addresses. The name of the model. The subdomain for your OIDC Identity Provider. The version of the model. The configuration of an Amazon Cognito workforce.
+ * A single Cognito workforce is created using and corresponds to a single
+ *
+ * Amazon Cognito user pool. The timestamp of when the job was created. The configuration of an OIDC Identity Provider (IdP) private workforce. The timestamp of when the edge packaging job was last updated. The date that the workforce is created. The configuration of a VPC workforce. The configurations and outcomes of an Amazon EMR step execution. The identifier of the EMR cluster. The status of your workforce. The identifier of the EMR cluster step. The reason your workforce failed. The name of the EMR cluster step. The path to the log file where the cluster step's failure root cause
- * is recorded. A single private workforce, which is automatically created when you create your first
+ * private work team. You can create one private work force in each Amazon Web Services Region. By default,
+ * any workforce-related API operation used in a specific region will apply to the
+ * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce. The name of the work team to return a description of. A schedule for a model monitoring job. For information about model monitor, see
- * Amazon SageMaker Model
- * Monitor. Provides details about a labeling work team. The Amazon Resource Name (ARN) of the monitoring schedule. The name of the monitoring schedule. The name of the work team. The status of the monitoring schedule. This can be one of the following values.
- *
- *
- *
- * A list of Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP).
+ * For private workforces created using Amazon Cognito use
+ * The type of the monitoring job definition to schedule. The Amazon Resource Name (ARN) that identifies the work team. If the monitoring schedule failed, the reason it failed. The Amazon Resource Name (ARN) of the workforce. The time that the monitoring schedule was created. The Amazon Marketplace identifier for a vendor's work team. The last time the monitoring schedule was changed. A description of the work team. Configures the monitoring schedule and defines the monitoring job. The URI of the labeling job's user interface. Workers open this URI to start labeling
+ * your data objects. The endpoint that hosts the model being monitored. The date and time that the work team was created (timestamp). Summary of information about the last monitoring job to run. The date and time that the work team was last updated (timestamp). A list of the tags associated with the monitoring schedlue. For more information, see Tagging Amazon Web Services
- * resources in the Amazon Web Services General Reference Guide. Configures SNS notifications of available or expiring work items for work
+ * teams. A hosted endpoint for real-time inference. The name of the endpoint. The Amazon Resource Name (ARN) of the endpoint. The endpoint configuration associated with the endpoint. A A list of the production variants hosted on the endpoint. Each production variant is a
- * model. Specifies weight and capacity values for a production variant. The currently active data capture configuration used by your Endpoint. The name of the variant to update. The status of the endpoint. The variant's weight. If the endpoint failed, the reason it failed. The variant's capacity. The time that the endpoint was created. Information of a particular device. The last time the endpoint was modified. The name of the device. A list of monitoring schedules for the endpoint. For information about model
- * monitoring, see Amazon SageMaker Model Monitor. Description of the device. A list of the tags associated with the endpoint. For more information, see Tagging Amazon Web Services
- * resources in the Amazon Web Services General Reference Guide. Amazon Web Services Internet of Things (IoT) object name. Provides summary information for an endpoint configuration. Contains information summarizing device details and deployment status. The name of the endpoint configuration. The ARN of the edge deployment plan. The Amazon Resource Name (ARN) of the endpoint configuration. The name of the edge deployment plan. A timestamp that shows when the endpoint configuration was created. The name of the stage in the edge deployment plan. The name of the deployed stage. The name of the fleet to which the device belongs to. Provides summary information for an endpoint. The name of the endpoint. The name of the device. The Amazon Resource Name (ARN) of the endpoint. The ARN of the device. A timestamp that shows when the endpoint was created. The deployment status of the device. A timestamp that shows when the endpoint was last modified. The detailed error message for the deployoment status result. The status of the endpoint.
- *
- *
- *
- *
- *
- *
- *
- * To get a list of endpoints with a specified status, use the ListEndpointsInput$StatusEquals filter. The description of the device. The time when the deployment on the device started. The properties of an experiment as returned by the Search API. Summary of the device fleet. The name of the experiment. Amazon Resource Name (ARN) of the device fleet. The Amazon Resource Name (ARN) of the experiment. Name of the device fleet. The name of the experiment as displayed. If Timestamp of when the device fleet was created. The source of the experiment. Timestamp of when the device fleet was last updated. The description of the experiment. Status of devices. When the experiment was created. The number of devices connected with a heartbeat. Who created the experiment. The number of registered devices. When the experiment was last modified. Summary of model on edge device. Information about the user who created or modified an experiment, trial, trial
- * component, lineage group, or project. The name of the model. The list of tags that are associated with the experiment. You can use Search API to search on the tags. The version model. A summary of the properties of an experiment. To get the complete set of properties, call
- * the DescribeExperiment API and provide the
- * Summary of the device. The Amazon Resource Name (ARN) of the experiment. The unique identifier of the device. The name of the experiment. Amazon Resource Name (ARN) of the device. The name of the experiment as displayed. If A description of the device. The source of the experiment. The name of the fleet the device belongs to. When the experiment was created. The Amazon Web Services Internet of Things (IoT) object thing name associated with the device.. When the experiment was last modified. The timestamp of the last registration or de-reregistration. The last heartbeat received from the device. Models on the device. Edge Manager agent version. The container for the metadata for Fail step. A message that you define and then is processed and rendered by
- * the Fail step when the error occurs. Amazon SageMaker Feature Store stores features in a collection called Feature Group.
- * A Feature Group can be visualized as a table which has rows,
- * with a unique identifier for each row where each column in the table is a feature.
- * In principle, a Feature Group is composed of features and values per features. The Amazon Resource Name (ARN) of a The name of the component to disassociate from the trial. The name of the The name of the trial to disassociate from. The name of the The name of the feature that stores the A The ARN of the trial component. A list of Valid
- * You can create up to 2,500 The Amazon Resource Name (ARN) of the trial. The time a The domain's details. A timestamp indicating the last time you updated the feature group. The domain's Amazon Resource Name (ARN). Use this to specify the Amazon Web Services Key Management Service (KMS) Key ID, or
- * The domain ID. The configuration of an Provide an To encrypt an The domain name. The Amazon Resource Name (ARN) of the IAM execution role used to create the feature
- * group. The status. A The creation time. The status of The last modified time. A value that indicates whether the feature group was updated successfully. The domain's URL. The reason that the A collection of settings that update the current configuration for the A free form description of a The execution role for the Tags used to define a Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that
+ * the version runs on. The name, Arn, A collection of The name of A collection of Unique identifier for the A directed edge connecting two lineage entities. A timestamp indicating the time of creation time of the The Amazon Resource Name (ARN) of the source lineage entity of the directed edge. The status of a FeatureGroup. The status can be any of the following:
- * The Amazon Resource Name (ARN) of the destination lineage entity of the directed edge. Notifies you if replicating data into the The type of the Association(Edge) between the source and destination. For example The metadata for a feature. It can either be metadata that you specify, or metadata that is updated automatically. Contains information summarizing an edge deployment plan. The Amazon Resource Number (ARN) of the feature group. The ARN of the edge deployment plan. The name of the feature group containing the feature. The name of the edge deployment plan. The name of feature. The name of the device fleet used for the deployment. The data type of the feature. The number of edge devices with the successful deployment. A timestamp indicating when the feature was created. The number of edge devices yet to pick up the deployment, or in progress. A timestamp indicating when the feature was last modified. The number of edge devices that failed the deployment. An optional description that you specify to better describe the feature. The time when the edge deployment plan was created. Optional key-value pairs that you specify to better describe the feature. The time when the edge deployment plan was last updated. A conditional statement for a search expression that includes a resource property, a
- * Boolean operator, and a value. Resources that match the statement are returned in the
- * results from the Search API. If you specify a In search, there are several property types: To define a metric filter, enter a value using the form
- *
- *
- *
- *
- *
- * To define a hyperparameter filter, enter a value with the form
- *
- *
- *
- *
- *
- * To define a tag filter, enter a value with the form
- * A resource property name. For example, A Boolean binary operator that is used to evaluate the filter. The operator field
- * contains one of the following values: The value of The value of The The The value of The value of The value of The value of The value of The value of A
- *
- *
- *
- *
- *
- *
- *
- *
- *
- *
- * A Status of edge devices with this model. The name of the model. A value used with The model version. The number of devices that have this model version and do not have a heart beat. The number of devices that have this model version and have a heart beat. The number of devices that have this model version, a heart beat, and are currently running. The number of devices with this model version and are producing sample data. Contains summary information about the flow definition. Summary of edge packaging job. The name of the flow definition. The Amazon Resource Name (ARN) of the edge packaging job. The Amazon Resource Name (ARN) of the flow definition. The name of the edge packaging job. The status of the flow definition. Valid values: The status of the edge packaging job. The timestamp when SageMaker created the flow definition. The name of the SageMaker Neo compilation job. The reason why the flow definition creation failed. A failure reason is returned only when the flow definition status is The name of the model. The version of the model. The timestamp of when the job was created. The timestamp of when the edge packaging job was last updated. The configurations and outcomes of an Amazon EMR step execution. The name of the fleet. The identifier of the EMR cluster. The identifier of the EMR cluster step. The name of the EMR cluster step. The path to the log file where the cluster step's failure root cause
+ * is recorded. The Amazon Resource Name (ARN) of the device. The name of the fleet. A schedule for a model monitoring job. For information about model monitor, see
+ * Amazon SageMaker Model
+ * Monitor. The output configuration for storing sample data collected by the fleet. The Amazon Resource Name (ARN) of the monitoring schedule. Description of the fleet. The name of the monitoring schedule. Timestamp of when the report was generated. The status of the monitoring schedule. This can be one of the following values.
+ *
+ *
+ *
+ * Status of devices. The type of the monitoring job definition to schedule. The versions of Edge Manager agent deployed on the fleet. If the monitoring schedule failed, the reason it failed. Status of model on device. The time that the monitoring schedule was created. The last time the monitoring schedule was changed. The name or Amazon Resource Name (ARN) of the lineage group. Configures the monitoring schedule and defines the monitoring job. The endpoint that hosts the model being monitored. Summary of information about the last monitoring job to run. A list of the tags associated with the monitoring schedlue. For more information, see Tagging Amazon Web Services
+ * resources in the Amazon Web Services General Reference Guide. A hosted endpoint for real-time inference. The Amazon Resource Name (ARN) of the lineage group. The name of the endpoint. The resource policy that gives access to the lineage group in another account. The Amazon Resource Name (ARN) of the endpoint. The endpoint configuration associated with the endpoint. A list of the production variants hosted on the endpoint. Each production variant is a
+ * model. The currently active data capture configuration used by your Endpoint. The status of the endpoint. If the endpoint failed, the reason it failed. The time that the endpoint was created. The name of the model group for which to get the resource policy. The last time the endpoint was modified. A list of monitoring schedules for the endpoint. For information about model
+ * monitoring, see Amazon SageMaker Model Monitor. The resource policy for the model group. A list of the tags associated with the endpoint. For more information, see Tagging Amazon Web Services
+ * resources in the Amazon Web Services General Reference Guide. Provides summary information for an endpoint configuration. The name of the endpoint configuration. The Amazon Resource Name (ARN) of the endpoint configuration. Whether Service Catalog is enabled or disabled in SageMaker. A timestamp that shows when the endpoint configuration was created. Part of the Provides summary information for an endpoint. Text that begins a property's name. The name of the endpoint. Specified in the GetSearchSuggestions request.
- * Limits the property names that are included in the response. Defines a property name hint. Only property
- * names that begin with the specified hint are included in the response. The Amazon Resource Name (ARN) of the endpoint. A timestamp that shows when the endpoint was created. The name of the Amazon SageMaker resource to search for. A timestamp that shows when the endpoint was last modified. Limits the property names that are included in the response. The status of the endpoint.
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * To get a list of endpoints with a specified status, use the ListEndpointsInput$StatusEquals filter. A property name returned from a The properties of an experiment as returned by the Search API. A suggested property name based on what you entered in the search textbox in the Amazon SageMaker
- * console. The name of the experiment. The Amazon Resource Name (ARN) of the experiment. A list of property names for a The name of the experiment as displayed. If The source of the experiment. Specifies configuration details for a Git repository when the repository is
- * updated. The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the
- * credentials used to access the git repository. The secret must have a staging label of
- *
- * The description of the experiment. When the experiment was created. Container for human task user interface information. The name of the human task user interface. Who created the experiment. The Amazon Resource Name (ARN) of the human task user interface. When the experiment was last modified. Information about the user who created or modified an experiment, trial, trial
+ * component, lineage group, or project. A timestamp when SageMaker created the human task user interface. The list of tags that are associated with the experiment. You can use Search API to search on the tags. Provides summary information about a hyperparameter tuning job. A summary of the properties of an experiment. To get the complete set of properties, call
+ * the DescribeExperiment API and provide the
+ * The name of the tuning job. The
- * Amazon
- * Resource Name (ARN) of the tuning job. The Amazon Resource Name (ARN) of the experiment. The status of the
- * tuning
- * job. The name of the experiment. Specifies the search strategy hyperparameter tuning uses to choose which
- * hyperparameters to
- * use
- * for each iteration. Currently, the only valid value is
- * Bayesian. The name of the experiment as displayed. If The date and time that the tuning job was created. The source of the experiment. The date and time that the tuning job ended. When the experiment was created. The date and time that the tuning job was
- * modified. When the experiment was last modified. The TrainingJobStatusCounters object that specifies the numbers of
- * training jobs, categorized by status, that this tuning job launched. The ObjectiveStatusCounters object that specifies the numbers of
- * training jobs, categorized by objective metric status, that this tuning job
- * launched. The container for the metadata for Fail step. The ResourceLimits object that specifies the maximum number of
- * training jobs and parallel training jobs allowed for this tuning job. A message that you define and then is processed and rendered by
+ * the Fail step when the error occurs. A SageMaker image. A SageMaker image represents a set of container images that are derived from
- * a common base container image. Each of these container images is represented by a SageMaker
- * Amazon SageMaker Feature Store stores features in a collection called Feature Group.
+ * A Feature Group can be visualized as a table which has rows,
+ * with a unique identifier for each row where each column in the table is a feature.
+ * In principle, a Feature Group is composed of features and values per features. When the image was created. The Amazon Resource Name (ARN) of a The description of the image. The name of the The name of the image as displayed. The name of the When a create, update, or delete operation fails, the reason for the failure. The name of the feature that stores the A The Amazon Resource Name (ARN) of the image. A list of Valid
+ * You can create up to 2,500 The name of the image. The time a The status of the image. A timestamp indicating the last time you updated the feature group. When the image was last modified. Use this to specify the Amazon Web Services Key Management Service (KMS) Key ID, or
+ * The configuration of an Provide an To encrypt an A version of a SageMaker When the version was created. The Amazon Resource Name (ARN) of the IAM execution role used to create the feature
+ * group. When a create or delete operation fails, the reason for the failure. A The Amazon Resource Name (ARN) of the image the version is based on. The status of The ARN of the version. A value that indicates whether the feature group was updated successfully. The status of the version. The reason that the When the version was last modified. A free form description of a The version number. Tags used to define a A structure that contains a list of recommendation jobs. The name, Arn, The name of Unique identifier for the A timestamp indicating the time of creation time of the The status of a FeatureGroup. The status can be any of the following:
+ * The name of the job. Notifies you if replicating data into the The job description. The metadata for a feature. It can either be metadata that you specify, or metadata that is updated automatically. The recommendation job type. The Amazon Resource Number (ARN) of the feature group. The Amazon Resource Name (ARN) of the recommendation job. The name of the feature group containing the feature. The status of the job. The name of feature. A timestamp that shows when the job was created. The data type of the feature. A timestamp that shows when the job completed. A timestamp indicating when the feature was created. The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker
- * to perform tasks on your behalf. A timestamp indicating when the feature was last modified. A timestamp that shows when the job was last modified. An optional description that you specify to better describe the feature. If the job fails, provides information why the job failed. Optional key-value pairs that you specify to better describe the feature. Provides counts for human-labeled tasks in the labeling job. A conditional statement for a search expression that includes a resource property, a
+ * Boolean operator, and a value. Resources that match the statement are returned in the
+ * results from the Search API. If you specify a In search, there are several property types: To define a metric filter, enter a value using the form
+ *
+ *
+ *
+ *
+ *
+ * To define a hyperparameter filter, enter a value with the form
+ *
+ *
+ *
+ *
+ *
+ * To define a tag filter, enter a value with the form
+ * The total number of data objects labeled by a human worker. A resource property name. For example, The total number of data objects that need to be labeled by a human worker. A Boolean binary operator that is used to evaluate the filter. The operator field
+ * contains one of the following values: The value of The value of The The The value of The value of The value of The value of The value of The value of A
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * A The total number of tasks in the labeling job. A value used with Provides summary information for a work team. Contains summary information about the flow definition. The name of the labeling job that the work team is assigned to. A unique identifier for a labeling job. You can use this to refer to a specific
- * labeling job. The Amazon Web Services account ID of the account used to start the labeling job. The date and time that the labeling job was created. Provides information about the progress of a labeling job. The configured number of workers per data object. The name of the flow definition. Provides summary information about a labeling job. The name of the labeling job. The Amazon Resource Name (ARN) of the flow definition. The Amazon Resource Name (ARN) assigned to the labeling job when it was
- * created. The status of the flow definition. Valid values: The date and time that the job was created (timestamp). The timestamp when SageMaker created the flow definition. The date and time that the job was last modified (timestamp). The current status of the labeling job. Counts showing the progress of the labeling job. The Amazon Resource Name (ARN) of the work team assigned to the job. The Amazon Resource Name (ARN) of a Lambda function. The function is run before each
- * data object is sent to a worker. The Amazon Resource Name (ARN) of the Lambda function used to consolidate the
- * annotations from individual workers into a label for a data object. For more
- * information, see Annotation
- * Consolidation. If the The reason why the flow definition creation failed. A failure reason is returned only when the flow definition status is The location of the output produced by the labeling job. Input configuration for the labeling job. Metadata for a Lambda step. The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution. A list of the output parameters of the Lambda step. Lists a summary of the properties of a lineage group. A lineage group provides a group of shareable lineage entity
- * resources. The Amazon Resource Name (ARN) of the lineage group resource. The name or Amazon Resource Name (ARN) of the lineage group. The display name of the lineage group summary. The creation time of the lineage group summary. The last modified time of the lineage group summary. The name of the fleet. A filter that returns only actions with the specified source URI. The Amazon Resource Name (ARN) of the device. A filter that returns only actions of the specified type. The name of the fleet. A filter that returns only actions created on or after the specified time. The output configuration for storing sample data collected by the fleet. A filter that returns only actions created on or before the specified time. Description of the fleet. The property used to sort results. The default value is Timestamp of when the report was generated. The sort order. The default value is Status of devices. If the previous call to The versions of Edge Manager agent deployed on the fleet. The maximum number of actions to return in the response. The default value is 10. Status of model on device. A list of actions and their properties. A token for getting the next set of actions, if there are any. The name or Amazon Resource Name (ARN) of the lineage group. A filter that returns only algorithms created after the specified time
- * (timestamp). The Amazon Resource Name (ARN) of the lineage group. A filter that returns only algorithms created before the specified time
- * (timestamp). The resource policy that gives access to the lineage group in another account. The maximum number of algorithms to return in the response. A string in the algorithm name. This filter returns only algorithms whose name
- * contains the specified string. The name of the model group for which to get the resource policy. If the response to a previous The parameter by which to sort the results. The default is
- * The resource policy for the model group. The sort order for the results. The default is >An array of If the response is truncated, SageMaker returns this token. To retrieve the next set of
- * algorithms, use it in the subsequent request. Whether Service Catalog is enabled or disabled in SageMaker. The maximum number of AppImageConfigs to return in the response. The default value is
- * 10. Part of the If the previous call to Text that begins a property's name. A filter that returns only AppImageConfigs whose name contains the specified string. Specified in the GetSearchSuggestions request.
+ * Limits the property names that are included in the response. A filter that returns only AppImageConfigs created on or before the specified time. Defines a property name hint. Only property
+ * names that begin with the specified hint are included in the response. A filter that returns only AppImageConfigs created on or after the specified time. A filter that returns only AppImageConfigs modified on or before the specified time. The name of the Amazon SageMaker resource to search for. A filter that returns only AppImageConfigs modified on or after the specified time. Limits the property names that are included in the response. The property used to sort results. The default value is A property name returned from a The sort order. The default value is A suggested property name based on what you entered in the search textbox in the Amazon SageMaker
+ * console. A token for getting the next set of AppImageConfigs, if there are any. A list of AppImageConfigs and their properties. A list of property names for a If the previous response was truncated, you will receive this token.
- * Use it in your next request to receive the next set of results. Specifies configuration details for a Git repository when the repository is
+ * updated. Returns a list up to a specified limit. The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the
+ * credentials used to access the git repository. The secret must have a staging label of
+ *
+ * The sort order for the results. The default is Ascending. Container for human task user interface information. The parameter by which to sort the results. The default is CreationTime. The name of the human task user interface. A parameter to search for the domain ID. The Amazon Resource Name (ARN) of the human task user interface. A parameter to search by user profile name. A timestamp when SageMaker created the human task user interface. The list of apps. If the previous response was truncated, you will receive this token.
- * Use it in your next request to receive the next set of results. Provides summary information about a hyperparameter tuning job. The name of the tuning job. The
+ * Amazon
+ * Resource Name (ARN) of the tuning job. A filter that returns only artifacts with the specified source URI. The status of the
+ * tuning
+ * job. A filter that returns only artifacts of the specified type. Specifies the search strategy hyperparameter tuning uses to choose which
+ * hyperparameters to
+ * use
+ * for each iteration. Currently, the only valid value is
+ * Bayesian. A filter that returns only artifacts created on or after the specified time. The date and time that the tuning job was created. A filter that returns only artifacts created on or before the specified time. The date and time that the tuning job ended. The property used to sort results. The default value is The date and time that the tuning job was
+ * modified. The sort order. The default value is The TrainingJobStatusCounters object that specifies the numbers of
+ * training jobs, categorized by status, that this tuning job launched. If the previous call to The ObjectiveStatusCounters object that specifies the numbers of
+ * training jobs, categorized by objective metric status, that this tuning job
+ * launched. The maximum number of artifacts to return in the response. The default value is 10. The ResourceLimits object that specifies the maximum number of
+ * training jobs and parallel training jobs allowed for this tuning job. A SageMaker image. A SageMaker image represents a set of container images that are derived from
+ * a common base container image. Each of these container images is represented by a SageMaker
+ * A list of artifacts and their properties. When the image was created. A token for getting the next set of artifacts, if there are any. The description of the image. The name of the image as displayed. A filter that returns only associations with the specified source ARN. When a create, update, or delete operation fails, the reason for the failure. A filter that returns only associations with the specified destination Amazon Resource Name (ARN). The Amazon Resource Name (ARN) of the image. A filter that returns only associations with the specified source type. The name of the image. A filter that returns only associations with the specified destination type. The status of the image. A filter that returns only associations of the specified type. When the image was last modified. A filter that returns only associations created on or after the specified time. A filter that returns only associations created on or before the specified time. A version of a SageMaker The property used to sort results. The default value is When the version was created. The sort order. The default value is When a create or delete operation fails, the reason for the failure. If the previous call to The Amazon Resource Name (ARN) of the image the version is based on. The maximum number of associations to return in the response. The default value is 10. The ARN of the version. The status of the version. A list of associations and their properties. When the version was last modified. A token for getting the next set of associations, if there are any. The version number. A structure that contains a list of recommendation jobs. Request a list of jobs, using a filter for time. The name of the job. Request a list of jobs, using a filter for time. The job description. Request a list of jobs, using a filter for time. The recommendation job type. Request a list of jobs, using a filter for time. The Amazon Resource Name (ARN) of the recommendation job. Request a list of jobs, using a search filter for name. The status of the job. Request a list of jobs, using a filter for status. A timestamp that shows when the job was created. The sort order for the results. The default is A timestamp that shows when the job completed. The parameter by which to sort the results. The default is The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker
+ * to perform tasks on your behalf. Request a list of jobs up to a specified limit. A timestamp that shows when the job was last modified. If the previous response was truncated, you receive this token. Use it in your next
- * request to receive the next set of results. If the job fails, provides information why the job failed. Provides counts for human-labeled tasks in the labeling job. The total number of data objects labeled by a human worker. Returns a summary list of jobs. The total number of data objects that need to be labeled by a human worker. If the previous response was truncated, you receive this token. Use it in your next
- * request to receive the next set of results. The total number of tasks in the labeling job. List the candidates created for the job by providing the job's name. Provides summary information for a work team. List the candidates for the job and filter by status. The name of the labeling job that the work team is assigned to. List the candidates for the job and filter by candidate name. A unique identifier for a labeling job. You can use this to refer to a specific
+ * labeling job. The sort order for the results. The default is The Amazon Web Services account ID of the account used to start the labeling job. The parameter by which to sort the results. The default is
- * The date and time that the labeling job was created. List the job's candidates up to a specified limit. Provides information about the progress of a labeling job. If the previous response was truncated, you receive this token. Use it in your next
- * request to receive the next set of results. The configured number of workers per data object. Provides summary information about a labeling job. Summaries about the The name of the labeling job. If the previous response was truncated, you receive this token. Use it in your next
- * request to receive the next set of results. The Amazon Resource Name (ARN) assigned to the labeling job when it was
+ * created. The date and time that the job was created (timestamp). A filter that returns only Git repositories that were created after the specified
- * time. The date and time that the job was last modified (timestamp). A filter that returns only Git repositories that were created before the specified
- * time. The current status of the labeling job. A filter that returns only Git repositories that were last modified after the
- * specified time. Counts showing the progress of the labeling job. A filter that returns only Git repositories that were last modified before the
- * specified time. The Amazon Resource Name (ARN) of the work team assigned to the job. The maximum number of Git repositories to return in the response. The Amazon Resource Name (ARN) of a Lambda function. The function is run before each
+ * data object is sent to a worker. A string in the Git repositories name. This filter returns only repositories whose
- * name contains the specified string. The Amazon Resource Name (ARN) of the Lambda function used to consolidate the
+ * annotations from individual workers into a label for a data object. For more
+ * information, see Annotation
+ * Consolidation. If the result of a If the The field to sort results by. The default is The location of the output produced by the labeling job. The sort order for results. The default is Input configuration for the labeling job. Metadata for a Lambda step. Gets a list of summaries of the Git repositories. Each summary specifies the following
- * values for the repository: Name Amazon Resource Name (ARN) Creation time Last modified time Configuration information, including the URL location of the repository and
- * the ARN of the Amazon Web Services Secrets Manager secret that contains the credentials used
- * to access the repository. The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution. If the result of a A list of the output parameters of the Lambda step. Lists a summary of the properties of a lineage group. A lineage group provides a group of shareable lineage entity
+ * resources. The Amazon Resource Name (ARN) of the lineage group resource. If the result of the previous The name or Amazon Resource Name (ARN) of the lineage group. The maximum number of model compilation jobs to return in the response. The display name of the lineage group summary. A filter that returns the model compilation jobs that were created after a specified
- * time. The creation time of the lineage group summary. A filter that returns the model compilation jobs that were created before a specified
- * time. The last modified time of the lineage group summary. A filter that returns the model compilation jobs that were modified after a specified
- * time. A filter that returns the model compilation jobs that were modified before a specified
- * time. A filter that returns only actions with the specified source URI. A filter that returns only actions of the specified type. A filter that returns only actions created on or after the specified time. A filter that returns only actions created on or before the specified time. A filter that returns the model compilation jobs whose name contains a specified
- * string. The property used to sort results. The default value is A filter that retrieves model compilation jobs with a specific DescribeCompilationJobResponse$CompilationJobStatus status. The sort order. The default value is The field by which to sort results. The default is If the previous call to The sort order for results. The default is The maximum number of actions to return in the response. The default value is 10. An array of CompilationJobSummary objects, each describing a model
- * compilation job. A list of actions and their properties. If the response is truncated, Amazon SageMaker returns this A token for getting the next set of actions, if there are any. A filter that returns only contexts with the specified source URI. A filter that returns only contexts of the specified type. A filter that returns only algorithms created after the specified time
+ * (timestamp). A filter that returns only contexts created on or after the specified time. A filter that returns only algorithms created before the specified time
+ * (timestamp). A filter that returns only contexts created on or before the specified time. The maximum number of algorithms to return in the response. The property used to sort results. The default value is A string in the algorithm name. This filter returns only algorithms whose name
+ * contains the specified string. The sort order. The default value is If the response to a previous If the previous call to The parameter by which to sort the results. The default is
+ * The maximum number of contexts to return in the response. The default value is 10. The sort order for the results. The default is A list of contexts and their properties. >An array of A token for getting the next set of contexts, if there are any. If the response is truncated, SageMaker returns this token. To retrieve the next set of
+ * algorithms, use it in the subsequent request. The maximum number of AppImageConfigs to return in the response. The default value is
+ * 10. A filter that lists the data quality job definitions associated with the specified
- * endpoint. If the previous call to The field to sort results by. The default is A filter that returns only AppImageConfigs whose name contains the specified string. The sort order for results. The default is A filter that returns only AppImageConfigs created on or before the specified time. If the result of the previous A filter that returns only AppImageConfigs created on or after the specified time. The maximum number of data quality monitoring job definitions to return in the
- * response. A filter that returns only AppImageConfigs modified on or before the specified time. A string in the data quality monitoring job definition name. This filter returns only
- * data quality monitoring job definitions whose name contains the specified string. A filter that returns only AppImageConfigs modified on or after the specified time. A filter that returns only data quality monitoring job definitions created before the
- * specified time. The property used to sort results. The default value is A filter that returns only data quality monitoring job definitions created after the
- * specified time. The sort order. The default value is Summary information about a monitoring job. The name of the monitoring job. A token for getting the next set of AppImageConfigs, if there are any. The Amazon Resource Name (ARN) of the monitoring job. A list of AppImageConfigs and their properties. The time that the monitoring job was created. The name of the endpoint that the job monitors. If the previous response was truncated, you will receive this token.
+ * Use it in your next request to receive the next set of results. Returns a list up to a specified limit. The sort order for the results. The default is Ascending. The parameter by which to sort the results. The default is CreationTime. A parameter to search for the domain ID. A parameter to search by user profile name. A list of data quality monitoring job definitions. The list of apps. If the result of the previous If the previous response was truncated, you will receive this token.
+ * Use it in your next request to receive the next set of results. The response from the last list when returning a list large enough to need tokening. The maximum number of results to select. A filter that returns only artifacts with the specified source URI. Filter fleets where packaging job was created after specified time. A filter that returns only artifacts of the specified type. Filter fleets where the edge packaging job was created before specified time. A filter that returns only artifacts created on or after the specified time. Select fleets where the job was updated after X A filter that returns only artifacts created on or before the specified time. Select fleets where the job was updated before X The property used to sort results. The default value is Filter for fleets containing this name in their fleet device name. The sort order. The default value is The column to sort by. If the previous call to What direction to sort in. The maximum number of artifacts to return in the response. The default value is 10. Summary of the device fleet. A list of artifacts and their properties. The response from the last list when returning a list large enough to need tokening. A token for getting the next set of artifacts, if there are any. The response from the last list when returning a list large enough to need tokening. A filter that returns only associations with the specified source ARN. Maximum number of results to select. A filter that returns only associations with the specified destination Amazon Resource Name (ARN). Select fleets where the job was updated after X A filter that returns only associations with the specified source type. A filter that searches devices that contains this name in any of their models. A filter that returns only associations with the specified destination type. Filter for fleets containing this name in their device fleet name. A filter that returns only associations of the specified type. A filter that returns only associations created on or after the specified time. Summary of devices. A filter that returns only associations created on or before the specified time. The response from the last list when returning a list large enough to need tokening. The property used to sort results. The default value is The sort order. The default value is If the previous response was truncated, you will receive this token.
- * Use it in your next request to receive the next set of results. If the previous call to Returns a list up to a specified limit. The maximum number of associations to return in the response. The default value is 10. The list of domains. A list of associations and their properties. If the previous response was truncated, you will receive this token.
- * Use it in your next request to receive the next set of results. A token for getting the next set of associations, if there are any. The response from the last list when returning a list large enough to need tokening. Maximum number of results to select. Select jobs where the job was created after specified time. Request a list of jobs, using a filter for time. Select jobs where the job was created before specified time. Request a list of jobs, using a filter for time. Select jobs where the job was updated after specified time. Request a list of jobs, using a filter for time. Select jobs where the job was updated before specified time. Request a list of jobs, using a filter for time. Filter for jobs containing this name in their packaging job name. Request a list of jobs, using a search filter for name. Filter for jobs where the model name contains this string. Request a list of jobs, using a filter for status. The job status to filter for. The sort order for the results. The default is Use to specify what column to sort by. The parameter by which to sort the results. The default is What direction to sort by. Request a list of jobs up to a specified limit. If the previous response was truncated, you receive this token. Use it in your next
+ * request to receive the next set of results. Summaries of edge packaging jobs. Returns a summary list of jobs. Token to use when calling the next page of results. If the previous response was truncated, you receive this token. Use it in your next
+ * request to receive the next set of results. The field to sort results by. The default is List the candidates created for the job by providing the job's name. The sort order for results. The default is List the candidates for the job and filter by status. If the result of the previous List the candidates for the job and filter by candidate name. The maximum number of training jobs to return in the response. The sort order for the results. The default is A string in the endpoint configuration name. This filter returns only endpoint
- * configurations whose name contains the specified string. The parameter by which to sort the results. The default is
+ * A filter that returns only endpoint configurations created before the specified
- * time (timestamp). List the job's candidates up to a specified limit. A filter that returns only endpoint configurations with a creation time greater
- * than or equal to the specified time (timestamp). If the previous response was truncated, you receive this token. Use it in your next
+ * request to receive the next set of results. An array of endpoint configurations. Summaries about the If the response is truncated, SageMaker returns this token. To retrieve the next set of
- * endpoint configurations, use it in the subsequent request If the previous response was truncated, you receive this token. Use it in your next
+ * request to receive the next set of results. Sorts the list of results. The default is The sort order for results. The default is A filter that returns only Git repositories that were created after the specified
+ * time. If the result of a A filter that returns only Git repositories that were created before the specified
+ * time. The maximum number of endpoints to return in the response. This value defaults to
- * 10. A filter that returns only Git repositories that were last modified after the
+ * specified time. A string in endpoint names. This filter returns only endpoints whose name contains
- * the specified string. A filter that returns only Git repositories that were last modified before the
+ * specified time. A filter that returns only endpoints that were created before the specified time
- * (timestamp). The maximum number of Git repositories to return in the response. A filter that returns only endpoints with a creation time greater than or equal to
- * the specified time (timestamp). A string in the Git repositories name. This filter returns only repositories whose
+ * name contains the specified string. A filter that returns only endpoints that were modified before the specified
- * timestamp. If the result of a A filter that returns only endpoints that were modified after the specified
- * timestamp. The field to sort results by. The default is A filter that returns only endpoints with the specified status. The sort order for results. The default is An array or endpoint objects. Gets a list of summaries of the Git repositories. Each summary specifies the following
+ * values for the repository: Name Amazon Resource Name (ARN) Creation time Last modified time Configuration information, including the URL location of the repository and
+ * the ARN of the Amazon Web Services Secrets Manager secret that contains the credentials used
+ * to access the repository. If the response is truncated, SageMaker returns this token. To retrieve the next set of
- * training jobs, use it in the subsequent request. If the result of a A filter that returns only experiments created after the specified time. If the result of the previous A filter that returns only experiments created before the specified time. The maximum number of model compilation jobs to return in the response. The property used to sort results. The default value is A filter that returns the model compilation jobs that were created after a specified
+ * time. The sort order. The default value is A filter that returns the model compilation jobs that were created before a specified
+ * time. If the previous call to A filter that returns the model compilation jobs that were modified after a specified
+ * time. The maximum number of experiments to return in the response. The default value is
- * 10. A filter that returns the model compilation jobs that were modified before a specified
+ * time. A filter that returns the model compilation jobs whose name contains a specified
+ * string. A filter that retrieves model compilation jobs with a specific DescribeCompilationJobResponse$CompilationJobStatus status. The field by which to sort results. The default is The sort order for results. The default is A list of the summaries of your experiments. An array of CompilationJobSummary objects, each describing a model
+ * compilation job. A token for getting the next set of experiments, if there are any. If the response is truncated, Amazon SageMaker returns this A string that partially matches one or more A A filter that returns only contexts with the specified source URI. An A filter that returns only contexts of the specified type. Use this parameter to search for A filter that returns only contexts created on or after the specified time. Use this parameter to search for A filter that returns only contexts created on or before the specified time. The order in which feature groups are listed. The property used to sort results. The default value is The value on which the feature group list is sorted. The sort order. The default value is The maximum number of results returned by If the previous call to A token to resume pagination of The maximum number of contexts to return in the response. The default value is 10. A summary of feature groups. A list of contexts and their properties. A token to resume pagination of A token for getting the next set of contexts, if there are any. A filter that returns only flow definitions with a creation time greater than or equal to the specified timestamp. A filter that lists the data quality job definitions associated with the specified
+ * endpoint. A filter that returns only flow definitions that were created before the specified timestamp. The field to sort results by. The default is An optional value that specifies whether you want the results sorted in The sort order for results. The default is A token to resume pagination. If the result of the previous The total number of items to return. If the total number of available items is more than the value specified in The maximum number of data quality monitoring job definitions to return in the
+ * response. A string in the data quality monitoring job definition name. This filter returns only
+ * data quality monitoring job definitions whose name contains the specified string. An array of objects describing the flow definitions. A filter that returns only data quality monitoring job definitions created before the
+ * specified time. A token to resume pagination. A filter that returns only data quality monitoring job definitions created after the
+ * specified time. A filter that returns only human task user interfaces with a creation time greater than or equal to the specified timestamp. Summary information about a monitoring job. A filter that returns only human task user interfaces that were created before the specified timestamp. The name of the monitoring job. An optional value that specifies whether you want the results sorted in The Amazon Resource Name (ARN) of the monitoring job. A token to resume pagination. The time that the monitoring job was created. The total number of items to return. If the total number of available items is more than the value specified in The name of the endpoint that the job monitors. An array of objects describing the human task user interfaces. A list of data quality monitoring job definitions. A token to resume pagination. If the result of the previous If the result of the previous The response from the last list when returning a list large enough to need tokening. The
- * maximum number of tuning jobs to return. The default value is
- * 10. The maximum number of results to select. The field to sort results by. The default is The sort order for results. The default is Filter fleets where packaging job was created after specified time. A string in the tuning job name. This filter returns only tuning jobs whose name
- * contains the specified string. Filter fleets where the edge packaging job was created before specified time. A filter that returns only tuning jobs that were created after the specified
- * time. Select fleets where the job was updated after X A filter that returns only tuning jobs that were created before the specified
- * time. Select fleets where the job was updated before X A filter that returns only tuning jobs that were modified after the specified
- * time. Filter for fleets containing this name in their fleet device name. A filter that returns only tuning jobs that were modified before the specified
- * time. The column to sort by. A filter that returns only tuning jobs with the specified status. What direction to sort in. A list of HyperParameterTuningJobSummary objects that
- * describe
- * the tuning jobs that the Summary of the device fleet. If the result of this The response from the last list when returning a list large enough to need tokening. A filter that returns only images created on or after the specified time. The response from the last list when returning a list large enough to need tokening. A filter that returns only images created on or before the specified time. Maximum number of results to select. A filter that returns only images modified on or after the specified time. Select fleets where the job was updated after X A filter that returns only images modified on or before the specified time. A filter that searches devices that contains this name in any of their models. The maximum number of images to return in the response. The default value is 10. Filter for fleets containing this name in their device fleet name. A filter that returns only images whose name contains the specified string. If the previous call to Summary of devices. The response from the last list when returning a list large enough to need tokening. The property used to sort results. The default value is The sort order. The default value is If the previous response was truncated, you will receive this token.
+ * Use it in your next request to receive the next set of results. Returns a list up to a specified limit. A list of images and their properties. The list of domains. A token for getting the next set of images, if there are any. If the previous response was truncated, you will receive this token.
+ * Use it in your next request to receive the next set of results. A filter that returns only versions created on or after the specified time. The response from the last list when returning a list large enough to need tokening. A filter that returns only versions created on or before the specified time. The maximum number of results to select (50 by default). The name of the image to list the versions of. Selects edge deployment plans created after this time. A filter that returns only versions modified on or after the specified time. Selects edge deployment plans created before this time. Selects edge deployment plans that were last updated after this time. A filter that returns only versions modified on or before the specified time. Selects edge deployment plans that were last updated before this time. The maximum number of versions to return in the response. The default value is 10. Selects edge deployment plans with names containing this name. If the previous call to Selects edge deployment plans with a device fleet name containing this name. The property used to sort results. The default value is The column by which to sort the edge deployment plans. Can be one of The sort order. The default value is The direction of the sorting (ascending or descending). A list of versions and their properties. List of summaries of edge deployment plans. A token for getting the next set of versions, if there are any. The token to use when calling the next page of results. A filter that returns only jobs created after the specified time (timestamp). The response from the last list when returning a list large enough to need tokening. Maximum number of results to select. Select jobs where the job was created after specified time. A filter that returns only jobs created before the specified time (timestamp). Select jobs where the job was created before specified time. A filter that returns only jobs that were last modified after the specified time (timestamp). Select jobs where the job was updated after specified time. A filter that returns only jobs that were last modified before the specified time (timestamp). Select jobs where the job was updated before specified time. A string in the job name. This filter returns only recommendations whose name contains the specified string. Filter for jobs containing this name in their packaging job name. A filter that retrieves only inference recommendations jobs with a specific status. The parameter by which to sort the results. Filter for jobs where the model name contains this string. The sort order for the results. The job status to filter for. If the response to a previous Use to specify what column to sort by. The maximum number of recommendations to return in the response. What direction to sort by. The recommendations created from the Amazon SageMaker Inference Recommender job. Summaries of edge packaging jobs. A token for getting the next set of recommendations, if there are any. Token to use when calling the next page of results. A filter that returns only labeling jobs created after the specified time
- * (timestamp). A filter that returns only labeling jobs created before the specified time
- * (timestamp). The field to sort results by. The default is A filter that returns only labeling jobs modified after the specified time
- * (timestamp). The sort order for results. The default is A filter that returns only labeling jobs modified before the specified time
- * (timestamp). If the result of the previous The maximum number of labeling jobs to return in each page of the response. The maximum number of training jobs to return in the response. If the result of the previous A string in the labeling job name. This filter returns only labeling jobs whose name
- * contains the specified string. A string in the endpoint configuration name. This filter returns only endpoint
+ * configurations whose name contains the specified string. The field to sort results by. The default is The sort order for results. The default is A filter that returns only endpoint configurations created before the specified
+ * time (timestamp). A filter that retrieves only labeling jobs with a specific status. A filter that returns only endpoint configurations with a creation time greater
+ * than or equal to the specified time (timestamp). An array of An array of endpoint configurations. If the response is truncated, SageMaker returns this token. To retrieve the next set of
- * labeling jobs, use it in the subsequent request. If the response is truncated, SageMaker returns this token. To retrieve the next set of
+ * endpoint configurations, use it in the subsequent request The Amazon Resource Name (ARN) of the work team for which you want to see labeling
- * jobs for. Sorts the list of results. The default is The maximum number of labeling jobs to return in each page of the response. The sort order for results. The default is If the result of the previous If the result of a A filter that returns only labeling jobs created after the specified time
- * (timestamp). The maximum number of endpoints to return in the response. This value defaults to
+ * 10. A filter that returns only labeling jobs created before the specified time
+ * A string in endpoint names. This filter returns only endpoints whose name contains
+ * the specified string. A filter that returns only endpoints that were created before the specified time
* (timestamp). A filter the limits jobs to only the ones whose job reference code contains the
- * specified string. A filter that returns only endpoints with a creation time greater than or equal to
+ * the specified time (timestamp). The field to sort results by. The default is A filter that returns only endpoints that were modified before the specified
+ * timestamp. The sort order for results. The default is A filter that returns only endpoints that were modified after the specified
+ * timestamp. A filter that returns only endpoints with the specified status. An array of An array or endpoint objects. If the response is truncated, SageMaker returns this token. To retrieve the next set of
- * labeling jobs, use it in the subsequent request. If the response is truncated, SageMaker returns this token. To retrieve the next set of
+ * training jobs, use it in the subsequent request. A timestamp to filter against lineage groups created after a certain point in time. A filter that returns only experiments created after the specified time. A timestamp to filter against lineage groups created before a certain point in time. A filter that returns only experiments created before the specified time. The parameter by which to sort the results. The default is
- * The property used to sort results. The default value is The sort order for the results. The default is The sort order. The default value is If the response is truncated, SageMaker returns this token. To retrieve the next set of
- * algorithms, use it in the subsequent request. If the previous call to The maximum number of endpoints to return in the response. This value defaults to
- * 10. The maximum number of experiments to return in the response. The default value is
+ * 10. A list of lineage groups and their properties. A list of the summaries of your experiments. If the response is truncated, SageMaker returns this token. To retrieve the next set of
- * algorithms, use it in the subsequent request. A token for getting the next set of experiments, if there are any. Name of the endpoint to monitor for model bias. A string that partially matches one or more Whether to sort results by the A Whether to sort the results in An The token returned if the response is truncated. To retrieve the next set of job
- * executions, use it in the next request. Use this parameter to search for The maximum number of model bias jobs to return in the response. The default value is
- * 10. Use this parameter to search for Filter for model bias jobs whose name contains a specified string. The order in which feature groups are listed. A filter that returns only model bias jobs created before a specified time. The value on which the feature group list is sorted. A filter that returns only model bias jobs created after a specified time. The maximum number of results returned by A token to resume pagination of A JSON array in which each element is a summary for a model bias jobs. A summary of feature groups. If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs,
- * use it in the subsequent request. A token to resume pagination of Name of the endpoint to monitor for model explainability. A filter that returns only flow definitions with a creation time greater than or equal to the specified timestamp. Whether to sort results by the A filter that returns only flow definitions that were created before the specified timestamp. Whether to sort the results in An optional value that specifies whether you want the results sorted in The token returned if the response is truncated. To retrieve the next set of job
- * executions, use it in the next request. A token to resume pagination. The maximum number of jobs to return in the response. The default value is 10. The total number of items to return. If the total number of available items is more than the value specified in Filter for model explainability jobs whose name contains a specified string. A filter that returns only model explainability jobs created before a specified
- * time. An array of objects describing the flow definitions. A filter that returns only model explainability jobs created after a specified
- * time. A token to resume pagination. A JSON array in which each element is a summary for a explainability bias jobs. A filter that returns only human task user interfaces with a creation time greater than or equal to the specified timestamp. If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs,
- * use it in the subsequent request. A filter that returns only human task user interfaces that were created before the specified timestamp. An optional value that specifies whether you want the results sorted in Part of the search expression. You can specify the name and value
- * (domain, task, framework, framework version, task, and model). The name of the of the model to filter by. A token to resume pagination. The value to filter the model metadata. The total number of items to return. If the total number of available items is more than the value specified in One or more filters that searches for the specified resource or resources in
- * a search. All resource objects that satisfy the expression's condition are
- * included in the search results A list of filter objects. An array of objects describing the human task user interfaces. A token to resume pagination. One or more filters that searches for the specified resource or resources
- * in a search. All resource objects that satisfy the expression's condition are
- * included in the search results. Specify the Framework, FrameworkVersion, Domain
- * or Task to filter supported. Filter names and values are case-sensitive. If the result of the previous If the response to a previous The
+ * maximum number of tuning jobs to return. The default value is
+ * 10. The maximum number of models to return in the response. The field to sort results by. The default is The sort order for results. The default is A summary of the model metadata. The machine learning domain of the model. A string in the tuning job name. This filter returns only tuning jobs whose name
+ * contains the specified string. The machine learning framework of the model. A filter that returns only tuning jobs that were created after the specified
+ * time. The machine learning task of the model. A filter that returns only tuning jobs that were created before the specified
+ * time. The name of the model. A filter that returns only tuning jobs that were modified after the specified
+ * time. The framework version of the model. A filter that returns only tuning jobs that were modified before the specified
+ * time. A filter that returns only tuning jobs with the specified status. A structure that holds model metadata. A list of HyperParameterTuningJobSummary objects that
+ * describe
+ * the tuning jobs that the A token for getting the next set of recommendations, if there are any. If the result of this A filter that returns only model groups created after the specified time. A filter that returns only images created on or after the specified time. A filter that returns only model groups created before the specified time. A filter that returns only images created on or before the specified time. The maximum number of results to return in the response. A filter that returns only images modified on or after the specified time. A filter that returns only images modified on or before the specified time. The maximum number of images to return in the response. The default value is 10. A string in the model group name. This filter returns only model groups whose name
- * contains the specified string. A filter that returns only images whose name contains the specified string. If the result of the previous If the previous call to The field to sort results by. The default is The property used to sort results. The default value is The sort order for results. The default is The sort order. The default value is Summary information about a model group. The name of the model group. The Amazon Resource Name (ARN) of the model group. A description of the model group. The time that the model group was created. A list of images and their properties. The status of the model group. A token for getting the next set of images, if there are any. A list of summaries of the model groups in your Amazon Web Services account. A filter that returns only versions created on or after the specified time. If the response is truncated, SageMaker returns this token. To retrieve the next set
- * of model groups, use it in the subsequent request. A filter that returns only versions created on or before the specified time. The name of the image to list the versions of. A filter that returns only model packages created after the specified time
- * (timestamp). A filter that returns only versions modified on or after the specified time. A filter that returns only model packages created before the specified time
- * (timestamp). A filter that returns only versions modified on or before the specified time. The maximum number of model packages to return in the response. The maximum number of versions to return in the response. The default value is 10. A string in the model package name. This filter returns only model packages whose name
- * contains the specified string. A filter that returns only the model packages with the specified approval
- * status. If the previous call to A filter that returns only model versions that belong to the specified model group. The property used to sort results. The default value is A filter that returns only the model packages of the specified type. This can be one
- * of the following values.
- *
- *
- * The sort order. The default value is If the response to a previous The parameter by which to sort the results. The default is
- * A list of versions and their properties. The sort order for the results. The default is A token for getting the next set of versions, if there are any. Provides summary information about a model package. The name of the model package. A filter that returns only jobs created after the specified time (timestamp). If the model package is a versioned model, the model group that the versioned model
- * belongs to. A filter that returns only jobs created before the specified time (timestamp). If the model package is a versioned model, the version of the model. A filter that returns only jobs that were last modified after the specified time (timestamp). The Amazon Resource Name (ARN) of the model package. A filter that returns only jobs that were last modified before the specified time (timestamp). A brief description of the model package. A string in the job name. This filter returns only recommendations whose name contains the specified string. A timestamp that shows when the model package was created. A filter that retrieves only inference recommendations jobs with a specific status. The overall status of the model package. The parameter by which to sort the results. The approval status of the model. This can be one of the following values.
- *
- *
- * The sort order for the results. If the response to a previous The maximum number of recommendations to return in the response. An array of The recommendations created from the Amazon SageMaker Inference Recommender job. If the response is truncated, SageMaker returns this token. To retrieve the next set of
- * model packages, use it in the subsequent request. A token for getting the next set of recommendations, if there are any. A filter that returns only model quality monitoring job definitions that are associated
- * with the specified endpoint. A filter that returns only labeling jobs created after the specified time
+ * (timestamp). The field to sort results by. The default is A filter that returns only labeling jobs created before the specified time
+ * (timestamp). The sort order for results. The default is A filter that returns only labeling jobs modified after the specified time
+ * (timestamp). If the result of the previous A filter that returns only labeling jobs modified before the specified time
+ * (timestamp). The maximum number of results to return in a call to
- * The maximum number of labeling jobs to return in each page of the response. A string in the transform job name. This filter returns only model quality monitoring
- * job definitions whose name contains the specified string. If the result of the previous A string in the labeling job name. This filter returns only labeling jobs whose name
+ * contains the specified string. A filter that returns only model quality monitoring job definitions created before the
- * specified time. The field to sort results by. The default is A filter that returns only model quality monitoring job definitions created after the
- * specified time. The sort order for results. The default is A filter that retrieves only labeling jobs with a specific status. A list of summaries of model quality monitoring job definitions. An array of If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of model
- * quality monitoring job definitions, use it in the next request. If the response is truncated, SageMaker returns this token. To retrieve the next set of
+ * labeling jobs, use it in the subsequent request. Sorts the list of results. The default is The Amazon Resource Name (ARN) of the work team for which you want to see labeling
+ * jobs for. The sort order for results. The default is The maximum number of labeling jobs to return in each page of the response. If the response to a previous If the result of the previous The maximum number of models to return in the response. A filter that returns only labeling jobs created after the specified time
+ * (timestamp). A string in the model name. This filter returns only models whose
- * name contains the specified string. A filter that returns only labeling jobs created before the specified time
+ * (timestamp). A filter that returns only models created before the specified time
- * (timestamp). A filter the limits jobs to only the ones whose job reference code contains the
+ * specified string. A filter that returns only models with a creation time greater than or equal to the
- * specified time (timestamp). The field to sort results by. The default is The sort order for results. The default is Provides summary information about a model. The name of the model that you want a summary for. The Amazon Resource Name (ARN) of the model. An array of A timestamp that indicates when the model was created. If the response is truncated, SageMaker returns this token. To retrieve the next set of
+ * labeling jobs, use it in the subsequent request. An array of A timestamp to filter against lineage groups created after a certain point in time. If the response is truncated, SageMaker returns this token. To retrieve the next set of
- * models, use it in the subsequent request. A timestamp to filter against lineage groups created before a certain point in time. The parameter by which to sort the results. The default is
+ * The sort order for the results. The default is If the response is truncated, SageMaker returns this token. To retrieve the next set of
+ * algorithms, use it in the subsequent request. The maximum number of endpoints to return in the response. This value defaults to
+ * 10. A list of lineage groups and their properties. If the response is truncated, SageMaker returns this token. To retrieve the next set of
+ * algorithms, use it in the subsequent request. Name of a specific schedule to fetch jobs for. Name of a specific endpoint to fetch jobs for. Name of the endpoint to monitor for model bias. Whether to sort results by Whether to sort results by the Whether to sort the results in The maximum number of jobs to return in the response. The default value is 10. The maximum number of model bias jobs to return in the response. The default value is
+ * 10. Filter for jobs scheduled before a specified time. Filter for jobs scheduled after a specified time. Filter for model bias jobs whose name contains a specified string. A filter that returns only jobs created before a specified time. A filter that returns only model bias jobs created before a specified time. A filter that returns only jobs created after a specified time. A filter that returns only model bias jobs created after a specified time. A filter that returns only jobs modified after a specified time. A filter that returns only jobs modified before a specified time. A filter that retrieves only jobs with a specific status. Gets a list of the monitoring job runs of the specified monitoring job
- * definitions. A filter that returns only the monitoring job runs of the specified monitoring
- * type. A JSON array in which each element is a summary for a monitoring execution. A JSON array in which each element is a summary for a model bias jobs. If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs,
- * use it in the subsequent reques Name of a specific endpoint to fetch schedules for. Name of the endpoint to monitor for model explainability. Whether to sort results by Whether to sort results by the Whether to sort the results in Filter for monitoring schedules whose name contains a specified string. Filter for model explainability jobs whose name contains a specified string. A filter that returns only monitoring schedules created before a specified time. A filter that returns only model explainability jobs created before a specified
+ * time. A filter that returns only monitoring schedules created after a specified time. A filter that returns only model explainability jobs created after a specified
+ * time. A filter that returns only monitoring schedules modified before a specified time. A filter that returns only monitoring schedules modified after a specified time. A JSON array in which each element is a summary for a explainability bias jobs. A filter that returns only monitoring schedules modified before a specified time. If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs,
+ * use it in the subsequent request. Gets a list of the monitoring schedules for the specified monitoring job
- * definition. Part of the search expression. You can specify the name and value
+ * (domain, task, framework, framework version, task, and model). A filter that returns only the monitoring schedules for the specified monitoring
- * type. The name of the of the model to filter by. The value to filter the model metadata. Summarizes the monitoring schedule. One or more filters that searches for the specified resource or resources in
+ * a search. All resource objects that satisfy the expression's condition are
+ * included in the search results The name of the monitoring schedule. A list of filter objects. The Amazon Resource Name (ARN) of the monitoring schedule. The creation time of the monitoring schedule. One or more filters that searches for the specified resource or resources
+ * in a search. All resource objects that satisfy the expression's condition are
+ * included in the search results. Specify the Framework, FrameworkVersion, Domain
+ * or Task to filter supported. Filter names and values are case-sensitive. The last time the monitoring schedule was modified. If the response to a previous The status of the monitoring schedule. The maximum number of models to return in the response. The name of the endpoint using the monitoring schedule. A summary of the model metadata. The name of the monitoring job definition that the schedule is for. The machine learning domain of the model. The type of the monitoring job definition that the schedule is for. The machine learning framework of the model. The machine learning task of the model. The name of the model. The framework version of the model. A JSON array in which each element is a summary for a monitoring schedule. A structure that holds model metadata. If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs,
- * use it in the subsequent request. A token for getting the next set of recommendations, if there are any. If the result of a The maximum number of lifecycle configurations to return in the response. Sorts the list of results. The default is A filter that returns only model groups created after the specified time. The sort order for results. A filter that returns only model groups created before the specified time. A string in the lifecycle configuration name. This filter returns only lifecycle
- * configurations whose name contains the specified string. The maximum number of results to return in the response. A filter that returns only lifecycle configurations that were created before the
- * specified time (timestamp). A string in the model group name. This filter returns only model groups whose name
+ * contains the specified string. A filter that returns only lifecycle configurations that were created after the
- * specified time (timestamp). If the result of the previous A filter that returns only lifecycle configurations that were modified before the
- * specified time (timestamp). The field to sort results by. The default is A filter that returns only lifecycle configurations that were modified after the
- * specified time (timestamp). The sort order for results. The default is Provides a summary of a notebook instance lifecycle configuration. Summary information about a model group. The name of the lifecycle configuration. The name of the model group. The Amazon Resource Name (ARN) of the lifecycle configuration. The Amazon Resource Name (ARN) of the model group. A timestamp that tells when the lifecycle configuration was created. A description of the model group. A timestamp that tells when the lifecycle configuration was last modified. The time that the model group was created. The status of the model group. If the response is truncated, SageMaker returns this token. To get the next set of
- * lifecycle configurations, use it in the next request. A list of summaries of the model groups in your Amazon Web Services account. An array of If the response is truncated, SageMaker returns this token. To retrieve the next set
+ * of model groups, use it in the subsequent request. A filter that returns only model packages created after the specified time
+ * (timestamp). If the previous call to the You might specify a filter or a sort order in your request. When response is
- * truncated, you must use the same values for the filer and sort order in the next
- * request. A filter that returns only model packages created before the specified time
+ * (timestamp). The maximum number of notebook instances to return. The maximum number of model packages to return in the response. The field to sort results by. The default is A string in the model package name. This filter returns only model packages whose name
+ * contains the specified string. The sort order for results. A filter that returns only the model packages with the specified approval
+ * status. A string in the notebook instances' name. This filter returns only notebook
- * instances whose name contains the specified string. A filter that returns only model versions that belong to the specified model group. A filter that returns only notebook instances that were created before the
- * specified time (timestamp). A filter that returns only the model packages of the specified type. This can be one
+ * of the following values.
+ *
+ *
+ * A filter that returns only notebook instances that were created after the specified
- * time (timestamp). If the response to a previous A filter that returns only notebook instances that were modified before the
- * specified time (timestamp). The parameter by which to sort the results. The default is
+ * A filter that returns only notebook instances that were modified after the
- * specified time (timestamp). The sort order for the results. The default is A filter that returns only notebook instances with the specified status. Provides summary information about a model package. A string in the name of a notebook instances lifecycle configuration associated with
- * this notebook instance. This filter returns only notebook instances associated with a
- * lifecycle configuration with a name that contains the specified string. The name of the model package. A string in the name or URL of a Git repository associated with this notebook
- * instance. This filter returns only notebook instances associated with a git repository
- * with a name that contains the specified string. If the model package is a versioned model, the model group that the versioned model
+ * belongs to. If the model package is a versioned model, the version of the model. The Amazon Resource Name (ARN) of the model package. A brief description of the model package. A timestamp that shows when the model package was created. The overall status of the model package. The approval status of the model. This can be one of the following values.
+ *
+ *
+ * An array of A filter that returns only notebook instances with associated with the specified git
- * repository. If the response is truncated, SageMaker returns this token. To retrieve the next set of
+ * model packages, use it in the subsequent request. Provides summary information for an SageMaker notebook instance. The name of the notebook instance that you want a summary for. The Amazon Resource Name (ARN) of the notebook instance. The status of the notebook instance. A filter that returns only model quality monitoring job definitions that are associated
+ * with the specified endpoint. The URL that you use to connect to the Jupyter notebook running in your notebook
- * instance. The field to sort results by. The default is The type of ML compute instance that the notebook instance is running on. The sort order for results. The default is A timestamp that shows when the notebook instance was created. If the result of the previous A timestamp that shows when the notebook instance was last modified. The maximum number of results to return in a call to
+ * The name of a notebook instance lifecycle configuration associated with this notebook
- * instance. For information about notebook instance lifestyle configurations, see Step
- * 2.1: (Optional) Customize a Notebook Instance. A string in the transform job name. This filter returns only model quality monitoring
+ * job definitions whose name contains the specified string. The Git repository associated with the notebook instance as its default code
- * repository. This can be either the name of a Git repository stored as a resource in your
- * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any
- * other Git repository. When you open a notebook instance, it opens in the directory that
- * contains this repository. For more information, see Associating Git Repositories with SageMaker
- * Notebook Instances. A filter that returns only model quality monitoring job definitions created before the
+ * specified time. An array of up to three Git repositories associated with the notebook instance. These
- * can be either the names of Git repositories stored as resources in your account, or the
- * URL of Git repositories in Amazon Web Services CodeCommit or in any
- * other Git repository. These repositories are cloned at the same level as the default
- * repository of your notebook instance. For more information, see Associating Git
- * Repositories with SageMaker Notebook Instances. A filter that returns only model quality monitoring job definitions created after the
+ * specified time. If the response to the previous A list of summaries of model quality monitoring job definitions. An array of If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of model
+ * quality monitoring job definitions, use it in the next request. The name of the pipeline. Sorts the list of results. The default is A filter that returns the pipeline executions that were created after a specified
- * time. The sort order for results. The default is A filter that returns the pipeline executions that were created before a specified
- * time. If the response to a previous The field by which to sort results. The default is The maximum number of models to return in the response. The sort order for results. A string in the model name. This filter returns only models whose
+ * name contains the specified string. If the result of the previous A filter that returns only models created before the specified time
+ * (timestamp). The maximum number of pipeline executions to return in the response. A filter that returns only models with a creation time greater than or equal to the
+ * specified time (timestamp). A pipeline execution summary. Provides summary information about a model. The Amazon Resource Name (ARN) of the pipeline execution. The name of the model that you want a summary for. The start time of the pipeline execution. The Amazon Resource Name (ARN) of the model. The status of the pipeline execution. A timestamp that indicates when the model was created. The description of the pipeline execution. The display name of the pipeline execution. An array of A message generated by SageMaker Pipelines describing why the pipeline execution failed. If the response is truncated, SageMaker returns this token. To retrieve the next set of
+ * models, use it in the subsequent request. Contains a sorted list of pipeline execution summary objects matching the specified
- * filters. Each run summary includes the Amazon Resource Name (ARN) of the pipeline execution, the run date,
- * and the status. This list can be empty. Name of a specific schedule to fetch jobs for. If the result of the previous Name of a specific endpoint to fetch jobs for. Whether to sort results by The Amazon Resource Name (ARN) of the pipeline execution. Whether to sort the results in If the result of the previous The token returned if the response is truncated. To retrieve the next set of job
+ * executions, use it in the next request. The maximum number of pipeline execution steps to return in the response. The maximum number of jobs to return in the response. The default value is 10. The field by which to sort results. The default is Filter for jobs scheduled before a specified time. Filter for jobs scheduled after a specified time. A filter that returns only jobs created before a specified time. A filter that returns only jobs created after a specified time. A filter that returns only jobs modified after a specified time. A filter that returns only jobs modified before a specified time. Metadata for Model steps. The Amazon Resource Name (ARN) of the created model. A filter that retrieves only jobs with a specific status. Gets a list of the monitoring job runs of the specified monitoring job
+ * definitions. Metadata for a processing job step. The Amazon Resource Name (ARN) of the processing job. A filter that returns only the monitoring job runs of the specified monitoring
+ * type. Container for the metadata for a Quality check step. For more information, see
- * the topic on QualityCheck step in the Amazon SageMaker Developer Guide.
- * The type of the Quality check step. The Amazon S3 URI of the baseline statistics file used for the drift check. A JSON array in which each element is a summary for a monitoring execution. The Amazon S3 URI of the baseline constraints file used for the drift check. If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs,
+ * use it in the subsequent reques The Amazon S3 URI of the newly calculated baseline statistics file. The Amazon S3 URI of the newly calculated baseline constraints file. The model package group name. Name of a specific endpoint to fetch schedules for. The Amazon S3 URI of violation report if violations are detected. Whether to sort results by The Amazon Resource Name (ARN) of the Quality check processing job that was run by this step execution. Whether to sort the results in This flag indicates if the drift check against the previous baseline will be skipped or not.
- * If it is set to The token returned if the response is truncated. To retrieve the next set of job
+ * executions, use it in the next request. This flag indicates if a newly calculated baseline can be accessed through step properties
- * The maximum number of jobs to return in the response. The default value is 10. Filter for monitoring schedules whose name contains a specified string. Metadata for a register model job step. The Amazon Resource Name (ARN) of the model package. A filter that returns only monitoring schedules created before a specified time. A filter that returns only monitoring schedules created after a specified time. Metadata for a training job step. The Amazon Resource Name (ARN) of the training job that was run by this step execution. A filter that returns only monitoring schedules modified before a specified time. A filter that returns only monitoring schedules modified after a specified time. Metadata for a transform job step. The Amazon Resource Name (ARN) of the transform job that was run by this step execution. A filter that returns only monitoring schedules modified before a specified time. Gets a list of the monitoring schedules for the specified monitoring job
+ * definition. Metadata for a tuning step. The Amazon Resource Name (ARN) of the tuning job that was run by this step execution. A filter that returns only the monitoring schedules for the specified monitoring
+ * type. Metadata for a step execution. Summarizes the monitoring schedule. The Amazon Resource Name (ARN) of the training job that was run by this step execution. The Amazon Resource Name (ARN) of the processing job that was run by this step execution. The Amazon Resource Name (ARN) of the transform job that was run by this step execution. The name of the monitoring schedule. The Amazon Resource Name (ARN) of the tuning job that was run by this step execution. The Amazon Resource Name (ARN) of the monitoring schedule. The Amazon Resource Name (ARN) of the model that was created by this step execution. The creation time of the monitoring schedule. The Amazon Resource Name (ARN) of the model package the model was registered to by this step execution. The last time the monitoring schedule was modified. The outcome of the condition evaluation that was run by this step execution. The status of the monitoring schedule. The URL of the Amazon SQS queue used by this step execution, the pipeline generated token,
- * and a list of output parameters. The name of the endpoint using the monitoring schedule. The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution and a list of
- * output parameters. The name of the monitoring job definition that the schedule is for. The configurations and outcomes of the check step execution. This includes: The type of the check conducted, The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check. The Amazon S3 URIs of newly calculated baseline constraints and statistics. The model package group name provided. The Amazon S3 URI of the violation report if violations detected. The Amazon Resource Name (ARN) of check processing job initiated by the step execution. The boolean flags indicating if the drift check is skipped. If step property The type of the monitoring job definition that the schedule is for. Container for the metadata for a Clarify check step. The configurations
- * and outcomes of the check step execution. This includes: The type of the check conducted, The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check. The Amazon S3 URIs of newly calculated baseline constraints and statistics. The model package group name provided. The Amazon S3 URI of the violation report if violations detected. The Amazon Resource Name (ARN) of check processing job initiated by the step execution. The boolean flags indicating if the drift check is skipped. If step property The configurations and outcomes of an EMR step execution. A JSON array in which each element is a summary for a monitoring schedule. The configurations and outcomes of a Fail step execution. If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs,
+ * use it in the subsequent request. An execution of a step in a pipeline. The name of the step that is executed. If the result of a The display name of the step. The maximum number of lifecycle configurations to return in the response. The description of the step. Sorts the list of results. The default is The time that the step started executing. The sort order for results. The time that the step stopped executing. A string in the lifecycle configuration name. This filter returns only lifecycle
+ * configurations whose name contains the specified string. The status of the step execution. A filter that returns only lifecycle configurations that were created before the
+ * specified time (timestamp). If this pipeline execution step was cached, details on the cache hit. A filter that returns only lifecycle configurations that were created after the
+ * specified time (timestamp). The current attempt of the execution step. For more information, see Retry Policy for SageMaker Pipelines steps. A filter that returns only lifecycle configurations that were modified before the
+ * specified time (timestamp). The reason why the step failed execution. This is only returned if the step failed its execution. A filter that returns only lifecycle configurations that were modified after the
+ * specified time (timestamp). Metadata for the step execution. Provides a summary of a notebook instance lifecycle configuration. The name of the lifecycle configuration. The Amazon Resource Name (ARN) of the lifecycle configuration. A list of A timestamp that tells when the lifecycle configuration was created. If the result of the previous A timestamp that tells when the lifecycle configuration was last modified. The Amazon Resource Name (ARN) of the pipeline execution. If the result of the previous If the response is truncated, SageMaker returns this token. To get the next set of
+ * lifecycle configurations, use it in the next request. The maximum number of parameters to return in the response. An array of Assigns a value to a named Pipeline parameter. The name of the parameter to assign a value to. This
- * parameter name must match a named parameter in the
- * pipeline definition. If the previous call to the You might specify a filter or a sort order in your request. When response is
+ * truncated, you must use the same values for the filer and sort order in the next
+ * request. The literal value for the parameter. The maximum number of notebook instances to return. The field to sort results by. The default is Contains a list of pipeline parameters. This list can be empty. The sort order for results. If the result of the previous A string in the notebook instances' name. This filter returns only notebook
+ * instances whose name contains the specified string. A filter that returns only notebook instances that were created before the
+ * specified time (timestamp). The prefix of the pipeline name. A filter that returns only notebook instances that were created after the specified
+ * time (timestamp). A filter that returns the pipelines that were created after a specified
- * time. A filter that returns only notebook instances that were modified before the
+ * specified time (timestamp). A filter that returns the pipelines that were created before a specified
- * time. A filter that returns only notebook instances that were modified after the
+ * specified time (timestamp). The field by which to sort results. The default is A filter that returns only notebook instances with the specified status. The sort order for results. A string in the name of a notebook instances lifecycle configuration associated with
+ * this notebook instance. This filter returns only notebook instances associated with a
+ * lifecycle configuration with a name that contains the specified string. If the result of the previous A string in the name or URL of a Git repository associated with this notebook
+ * instance. This filter returns only notebook instances associated with a git repository
+ * with a name that contains the specified string. The maximum number of pipelines to return in the response. A filter that returns only notebook instances with associated with the specified git
+ * repository. A summary of a pipeline. Provides summary information for an SageMaker notebook instance. The Amazon Resource Name (ARN) of the pipeline. The name of the notebook instance that you want a summary for. The name of the pipeline. The Amazon Resource Name (ARN) of the notebook instance. The display name of the pipeline. The status of the notebook instance. The description of the pipeline. The URL that you use to connect to the Jupyter notebook running in your notebook
+ * instance. The Amazon Resource Name (ARN) that the pipeline used to execute. The type of ML compute instance that the notebook instance is running on. The creation time of the pipeline. A timestamp that shows when the notebook instance was created. The time that the pipeline was last modified. A timestamp that shows when the notebook instance was last modified. The last time that a pipeline execution began. The name of a notebook instance lifecycle configuration associated with this notebook
+ * instance. For information about notebook instance lifestyle configurations, see Step
+ * 2.1: (Optional) Customize a Notebook Instance. Contains a sorted list of The Git repository associated with the notebook instance as its default code
+ * repository. This can be either the name of a Git repository stored as a resource in your
+ * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any
+ * other Git repository. When you open a notebook instance, it opens in the directory that
+ * contains this repository. For more information, see Associating Git Repositories with SageMaker
+ * Notebook Instances. If the result of the previous An array of up to three Git repositories associated with the notebook instance. These
+ * can be either the names of Git repositories stored as resources in your account, or the
+ * URL of Git repositories in Amazon Web Services CodeCommit or in any
+ * other Git repository. These repositories are cloned at the same level as the default
+ * repository of your notebook instance. For more information, see Associating Git
+ * Repositories with SageMaker Notebook Instances. A filter that returns only processing jobs created after the specified time. If the response to the previous A filter that returns only processing jobs created after the specified time. An array of A filter that returns only processing jobs modified after the specified time. A filter that returns only processing jobs modified before the specified time. The name of the pipeline. A string in the processing job name. This filter returns only processing jobs whose
- * name contains the specified string. A filter that returns the pipeline executions that were created after a specified
+ * time. A filter that retrieves only processing jobs with a specific status. A filter that returns the pipeline executions that were created before a specified
+ * time. The field to sort results by. The default is The field by which to sort results. The default is The sort order for results. The default is The sort order for results. If the result of the previous If the result of the previous The maximum number of processing jobs to return in the response. The maximum number of pipeline executions to return in the response. Summary of information about a processing job. A pipeline execution summary. The name of the processing job. The Amazon Resource Name (ARN) of the processing job.. The time at which the processing job was created. The Amazon Resource Name (ARN) of the pipeline execution. The time at which the processing job completed. The start time of the pipeline execution. A timestamp that indicates the last time the processing job was modified. The status of the pipeline execution. The status of the processing job. The description of the pipeline execution. A string, up to one KB in size, that contains the reason a processing job failed, if
- * it failed. The display name of the pipeline execution. An optional string, up to one KB in size, that contains metadata from the processing
- * container when the processing job exits. A message generated by SageMaker Pipelines describing why the pipeline execution failed. An array of Contains a sorted list of pipeline execution summary objects matching the specified
+ * filters. Each run summary includes the Amazon Resource Name (ARN) of the pipeline execution, the run date,
+ * and the status. This list can be empty. If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of
- * processing jobs, use it in the subsequent request. If the result of the previous The Amazon Resource Name (ARN) of the pipeline execution. If the result of the previous The maximum number of pipeline execution steps to return in the response. The field by which to sort results. The default is Metadata for Model steps. The Amazon Resource Name (ARN) of the created model. Metadata for a processing job step. The Amazon Resource Name (ARN) of the processing job. Container for the metadata for a Quality check step. For more information, see
+ * the topic on QualityCheck step in the Amazon SageMaker Developer Guide.
+ * The type of the Quality check step. The Amazon S3 URI of the baseline statistics file used for the drift check. The Amazon S3 URI of the baseline constraints file used for the drift check. The Amazon S3 URI of the newly calculated baseline statistics file. The Amazon S3 URI of the newly calculated baseline constraints file. The model package group name. The Amazon S3 URI of violation report if violations are detected. The Amazon Resource Name (ARN) of the Quality check processing job that was run by this step execution. This flag indicates if the drift check against the previous baseline will be skipped or not.
+ * If it is set to This flag indicates if a newly calculated baseline can be accessed through step properties
+ * Metadata for a register model job step. The Amazon Resource Name (ARN) of the model package. Metadata for a training job step. The Amazon Resource Name (ARN) of the training job that was run by this step execution. Metadata for a transform job step. The Amazon Resource Name (ARN) of the transform job that was run by this step execution. Metadata for a tuning step. The Amazon Resource Name (ARN) of the tuning job that was run by this step execution. Metadata for a step execution. The Amazon Resource Name (ARN) of the training job that was run by this step execution. The Amazon Resource Name (ARN) of the processing job that was run by this step execution. The Amazon Resource Name (ARN) of the transform job that was run by this step execution. The Amazon Resource Name (ARN) of the tuning job that was run by this step execution. The Amazon Resource Name (ARN) of the model that was created by this step execution. The Amazon Resource Name (ARN) of the model package the model was registered to by this step execution. The outcome of the condition evaluation that was run by this step execution. The URL of the Amazon SQS queue used by this step execution, the pipeline generated token,
+ * and a list of output parameters. The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution and a list of
+ * output parameters. The configurations and outcomes of the check step execution. This includes: The type of the check conducted, The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check. The Amazon S3 URIs of newly calculated baseline constraints and statistics. The model package group name provided. The Amazon S3 URI of the violation report if violations detected. The Amazon Resource Name (ARN) of check processing job initiated by the step execution. The boolean flags indicating if the drift check is skipped. If step property Container for the metadata for a Clarify check step. The configurations
+ * and outcomes of the check step execution. This includes: The type of the check conducted, The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check. The Amazon S3 URIs of newly calculated baseline constraints and statistics. The model package group name provided. The Amazon S3 URI of the violation report if violations detected. The Amazon Resource Name (ARN) of check processing job initiated by the step execution. The boolean flags indicating if the drift check is skipped. If step property The configurations and outcomes of an EMR step execution. The configurations and outcomes of a Fail step execution. An execution of a step in a pipeline. The name of the step that is executed. The display name of the step. The description of the step. The time that the step started executing. The time that the step stopped executing. The status of the step execution. If this pipeline execution step was cached, details on the cache hit. The current attempt of the execution step. For more information, see Retry Policy for SageMaker Pipelines steps. The reason why the step failed execution. This is only returned if the step failed its execution. Metadata for the step execution. A list of If the result of the previous The Amazon Resource Name (ARN) of the pipeline execution. If the result of the previous The maximum number of parameters to return in the response. Assigns a value to a named Pipeline parameter. The name of the parameter to assign a value to. This
+ * parameter name must match a named parameter in the
+ * pipeline definition. The literal value for the parameter. Contains a list of pipeline parameters. This list can be empty. If the result of the previous The prefix of the pipeline name. A filter that returns the pipelines that were created after a specified
+ * time. A filter that returns the pipelines that were created before a specified
+ * time. The field by which to sort results. The default is The sort order for results. If the result of the previous The maximum number of pipelines to return in the response. A summary of a pipeline. The Amazon Resource Name (ARN) of the pipeline. The name of the pipeline. The display name of the pipeline. The description of the pipeline. The Amazon Resource Name (ARN) that the pipeline used to execute. The creation time of the pipeline. The time that the pipeline was last modified. The last time that a pipeline execution began. Contains a sorted list of If the result of the previous A filter that returns only processing jobs created after the specified time. A filter that returns only processing jobs created after the specified time. A filter that returns only processing jobs modified after the specified time. A filter that returns only processing jobs modified before the specified time. A string in the processing job name. This filter returns only processing jobs whose
+ * name contains the specified string. A filter that retrieves only processing jobs with a specific status. The field to sort results by. The default is The sort order for results. The default is If the result of the previous The maximum number of processing jobs to return in the response. Summary of information about a processing job. The name of the processing job. The Amazon Resource Name (ARN) of the processing job.. The time at which the processing job was created. The time at which the processing job completed. A timestamp that indicates the last time the processing job was modified. The status of the processing job. A string, up to one KB in size, that contains the reason a processing job failed, if
+ * it failed. An optional string, up to one KB in size, that contains metadata from the processing
+ * container when the processing job exits. An array of If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of
+ * processing jobs, use it in the subsequent request. The response from the last list when returning a list large enough to neeed tokening. The maximum number of requests to select. The name of the edge deployment plan. Toggle for excluding devices deployed in other stages. The name of the stage in the deployment. List of summaries of devices allocated to the stage. The token to use when calling the next page of results. The name of the edge deployment plan to start. The name of the stage to start. The name of the schedule to start. The name of the edge deployment plan to stop. The name of the stage to stop. The name of the edge packaging job. Creates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment configuration and devices. The name of the edge deployment plan. List of models associated with the edge deployment plan. The device fleet used for this edge deployment plan. List of stages of the edge deployment plan. The number of stages is limited to 10 per deployment. List of tags with which to tag the edge deployment plan. The ARN of the edge deployment plan. Creates a new stage in an existing edge deployment plan. The name of the edge deployment plan. List of stages to be added to the edge deployment plan. Deletes an edge deployment plan if (and only if) all the stages in the plan are inactive or there are no stages in the plan. The name of the edge deployment plan to delete. Delete a stage in an edge deployment plan if (and only if) the stage is inactive. The name of the edge deployment plan from which the stage will be deleted. The name of the stage. The deployment configuration for an endpoint, which contains the desired deployment\n strategy and rollback configurations. The name of the stage. Configuration of the devices in the stage. Configuration of the deployment details. Contains information about a stage in an edge deployment plan. The name of the stage. Configuration of the devices in the stage. Configuration of the deployment details. General status of the current state. Contains information summarizing the deployment stage results. Describes an edge deployment plan with deployment status per stage. The name of the deployment plan to describe. If the edge deployment plan has enough stages to require tokening, then this is the response from the last list of stages returned. The maximum number of results to select (50 by default). The ARN of edge deployment plan. The name of the edge deployment plan. List of models associated with the edge deployment plan. The device fleet used for this edge deployment plan. The number of edge devices with the successful deployment. The number of edge devices yet to pick up deployment, or in progress. The number of edge devices that failed the deployment. List of stages in the edge deployment plan. Token to use when calling the next set of stages in the edge deployment plan. The time when the edge deployment plan was created. The time when the edge deployment plan was last updated. The ARN of the edge deployment plan. The name of the edge deployment plan. The name of the stage in the edge deployment plan. The name of the deployed stage. The name of the fleet to which the device belongs to. The name of the device. The ARN of the device. The deployment status of the device. The detailed error message for the deployoment status result. The description of the device. The time when the deployment on the device started. Contains information summarizing device details and deployment status. Type of device subsets to deploy to the current stage. Percentage of devices in the fleet to deploy to the current stage. List of devices chosen to deploy. A filter to select devices with names containing this name. Contains information about the configurations of selected devices. Status of devices. A directed edge connecting two lineage entities. Toggle that determines whether to rollback to previous configuration if the current deployment fails.\n By default this is turned on. You may turn this off if you want to investigate the errors yourself. Contains information about the configuration of a deployment. The name the device application uses to reference this model. The edge packaging job associated with this deployment. Contains information about the configuration of a model in a deployment. The ARN of the edge deployment plan. The name of the edge deployment plan. The name of the device fleet used for the deployment. The number of edge devices with the successful deployment. The number of edge devices yet to pick up the deployment, or in progress. The number of edge devices that failed the deployment. The time when the edge deployment plan was created. The time when the edge deployment plan was last updated. Contains information summarizing an edge deployment plan. The general status of the current stage. The number of edge devices with the successful deployment in the current stage. The number of edge devices yet to pick up the deployment in current stage, or in progress. The number of edge devices that failed the deployment in current stage. A detailed message about deployment status in current stage. The time when the deployment API started. Contains information summarizing the deployment stage results. The container for the metadata for Fail step. Lists all edge deployment plans. The response from the last list when returning a list large enough to need tokening. The maximum number of results to select (50 by default). Selects edge deployment plans created after this time. Selects edge deployment plans created before this time. Selects edge deployment plans that were last updated after this time. Selects edge deployment plans that were last updated before this time. Selects edge deployment plans with names containing this name. Selects edge deployment plans with a device fleet name containing this name. The column by which to sort the edge deployment plans. Can be one of The direction of the sorting (ascending or descending). List of summaries of edge deployment plans. The token to use when calling the next page of results. Lists devices allocated to the stage, containing detailed device information and deployment status. The response from the last list when returning a list large enough to neeed tokening. The maximum number of requests to select. The name of the edge deployment plan. Toggle for excluding devices deployed in other stages. The name of the stage in the deployment. List of summaries of devices allocated to the stage. The token to use when calling the next page of results. Starts a stage in an edge deployment plan. The name of the edge deployment plan to start. The name of the stage to start. Stops a stage in an edge deployment plan. The name of the edge deployment plan to stop. The name of the stage to stop. A list of resource Amazon Resource Name (ARN) that represent the starting point for your lineage query. Associations between lineage entities have a direction. This parameter determines the direction from the
diff --git a/codegen/sdk-codegen/aws-models/sagemaker.json b/codegen/sdk-codegen/aws-models/sagemaker.json
index 0786d0856619c..82a13aeb7279f 100644
--- a/codegen/sdk-codegen/aws-models/sagemaker.json
+++ b/codegen/sdk-codegen/aws-models/sagemaker.json
@@ -35240,8 +35240,7 @@
"StartArns": {
"target": "com.amazonaws.sagemaker#QueryLineageStartArns",
"traits": {
- "smithy.api#documentation": " A list of resource Amazon Resource Name (ARN) that represent the starting point for your lineage query. A list of resource Amazon Resource Name (ARN) that represent the starting point for your lineage query.
+ * Returns the list of log groups that contain log anomalies.
+ * Returns a list of the events emitted by the resources that are evaluated by DevOps Guru.
* You can use filters to specify which events are returned.
+ * Returns the list of all log groups that are being monitored and tagged by DevOps Guru.
+ * Returns a list of notification channels configured for DevOps Guru. Each notification
* channel is used to notify you when DevOps Guru generates an insight that contains information
diff --git a/clients/client-devops-guru/src/DevOpsGuruClient.ts b/clients/client-devops-guru/src/DevOpsGuruClient.ts
index fe001b5be4ee2..7ea4e459a2ac6 100644
--- a/clients/client-devops-guru/src/DevOpsGuruClient.ts
+++ b/clients/client-devops-guru/src/DevOpsGuruClient.ts
@@ -102,8 +102,16 @@ import {
ListAnomaliesForInsightCommandInput,
ListAnomaliesForInsightCommandOutput,
} from "./commands/ListAnomaliesForInsightCommand";
+import {
+ ListAnomalousLogGroupsCommandInput,
+ ListAnomalousLogGroupsCommandOutput,
+} from "./commands/ListAnomalousLogGroupsCommand";
import { ListEventsCommandInput, ListEventsCommandOutput } from "./commands/ListEventsCommand";
import { ListInsightsCommandInput, ListInsightsCommandOutput } from "./commands/ListInsightsCommand";
+import {
+ ListMonitoredResourcesCommandInput,
+ ListMonitoredResourcesCommandOutput,
+} from "./commands/ListMonitoredResourcesCommand";
import {
ListNotificationChannelsCommandInput,
ListNotificationChannelsCommandOutput,
@@ -161,8 +169,10 @@ export type ServiceInputTypes =
| GetCostEstimationCommandInput
| GetResourceCollectionCommandInput
| ListAnomaliesForInsightCommandInput
+ | ListAnomalousLogGroupsCommandInput
| ListEventsCommandInput
| ListInsightsCommandInput
+ | ListMonitoredResourcesCommandInput
| ListNotificationChannelsCommandInput
| ListOrganizationInsightsCommandInput
| ListRecommendationsCommandInput
@@ -192,8 +202,10 @@ export type ServiceOutputTypes =
| GetCostEstimationCommandOutput
| GetResourceCollectionCommandOutput
| ListAnomaliesForInsightCommandOutput
+ | ListAnomalousLogGroupsCommandOutput
| ListEventsCommandOutput
| ListInsightsCommandOutput
+ | ListMonitoredResourcesCommandOutput
| ListNotificationChannelsCommandOutput
| ListOrganizationInsightsCommandOutput
| ListRecommendationsCommandOutput
diff --git a/clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts b/clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts
new file mode 100644
index 0000000000000..55fbef7d62b07
--- /dev/null
+++ b/clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts
@@ -0,0 +1,98 @@
+// smithy-typescript generated code
+import { getSerdePlugin } from "@aws-sdk/middleware-serde";
+import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
+import { Command as $Command } from "@aws-sdk/smithy-client";
+import {
+ FinalizeHandlerArguments,
+ Handler,
+ HandlerExecutionContext,
+ HttpHandlerOptions as __HttpHandlerOptions,
+ MetadataBearer as __MetadataBearer,
+ MiddlewareStack,
+ SerdeContext as __SerdeContext,
+} from "@aws-sdk/types";
+
+import { DevOpsGuruClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DevOpsGuruClient";
+import { ListAnomalousLogGroupsRequest, ListAnomalousLogGroupsResponse } from "../models/models_0";
+import {
+ deserializeAws_restJson1ListAnomalousLogGroupsCommand,
+ serializeAws_restJson1ListAnomalousLogGroupsCommand,
+} from "../protocols/Aws_restJson1";
+
+export interface ListAnomalousLogGroupsCommandInput extends ListAnomalousLogGroupsRequest {}
+export interface ListAnomalousLogGroupsCommandOutput extends ListAnomalousLogGroupsResponse, __MetadataBearer {}
+
+/**
+ *
+ * Returns the list of log groups that contain log anomalies.
+ *
+ * Returns the list of all log groups that are being monitored and tagged by DevOps Guru.
+ *
+ * Information about an anomalous log event found within a log group.
+ *
+ * The name of the Amazon CloudWatch log stream that the anomalous log event belongs to. A log stream is a sequence of log events that share the same source.
+ *
+ * The type of log anomaly that has been detected.
+ *
+ * The token where the anomaly was detected. This may refer to an exception or another location, or it may be blank for log anomalies such as format anomalies.
+ *
+ * The ID of the log event.
+ *
+ * The explanation for why the log event is considered an anomaly.
+ *
+ * The number of log lines where this anomalous log event occurs.
+ *
+ * The time of the first occurrence of the anomalous log event.
+ *
+ * A cluster of similar anomalous log events found within a log group.
+ *
+ * A list of anomalous log events that may be related.
+ *
+ * An Amazon CloudWatch log group that contains log anomalies and is used to generate an insight.
+ *
+ * The name of the CloudWatch log group.
+ *
+ * The time the anomalous log events began. The impact start time indicates the time of the first log anomaly event that occurs.
+ *
+ * The time the anomalous log events stopped.
+ *
+ * The number of log lines that were scanned for anomalous log events.
+ *
+ * The log anomalies in the log group. Each log anomaly displayed represents a cluster of similar anomalous log events.
+ * A time range that specifies when DevOps Guru opens and then closes an anomaly. This
* is different from The dimension of am Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in
+ * The dimension of an Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in
* your account for operational problems and anomalous behavior. A dimension is a
* name/value pair that is part of the identity of a metric. A metric can have up to 10
* dimensions. For more information, see Dimensions in the Amazon CloudWatch User Guide.
+ * Information about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection.
+ * Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups. Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager
* OpsCenter for each created insight.
+ * Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups.
+ *
+ * The ID of the insight containing the log groups.
+ * The maximum number of results to return with a single call.
+ * To retrieve the remaining results, make another call with the returned The pagination token to use to retrieve
+ * the next page of results for this operation. If this value is null, it retrieves the first page.
+ * The ID of the insight containing the log groups.
+ *
+ * The list of Amazon CloudWatch log groups that are related to an insight.
+ * The pagination token to use to retrieve
+ * the next page of results for this operation. If there are no more pages, this value is null.
+ * Filters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status.
+ *
+ * The permission status of a resource.
+ *
+ * The type of resource that you wish to retrieve, such as log groups.
+ *
+ * Filters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status.
+ * The maximum number of results to return with a single call.
+ * To retrieve the remaining results, make another call with the returned The pagination token to use to retrieve
+ * the next page of results for this operation. If this value is null, it retrieves the first page.
+ * Information about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource.
+ *
+ * The name of the resource being monitored.
+ *
+ * The type of resource being monitored.
+ *
+ * The permission status of a resource.
+ *
+ * Information about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource.
+ * The pagination token to use to retrieve
+ * the next page of results for this operation. If there are no more pages, this value is null. The pagination token to use to retrieve
@@ -5038,9 +5407,30 @@ export namespace UpdateResourceCollectionResponse {
});
}
+/**
+ *
+ * Information about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection. You can use this to update the configuration.
+ * Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups. Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager
- * OpsCenter for each created insight. Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager
- * OpsCenter for each created insight.
+ * Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups.
+ * Information about your account's integration with Amazon CodeGuru Profiler. This\n\t\t\treturns whether DevOps Guru is configured to consume recommendations generated from Amazon\n\t\t\tCodeGuru Profiler. \n\t\t\tThe name of the CloudWatch log group.\n\t\t \n\t\t\tThe time the anomalous log events began. The impact start time indicates the time of the first log anomaly event that occurs.\n\t\t \n\t\t\tThe time the anomalous log events stopped.\n\t\t \n\t\t\tThe number of log lines that were scanned for anomalous log events.\n\t\t \n\t\t\tThe log anomalies in the log group. Each log anomaly displayed represents a cluster of similar anomalous log events.\n\t\t \n\t\t\tAn Amazon CloudWatch log group that contains log anomalies and is used to generate an insight.\n\t\t The dimension of am Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in\n\t\t\tyour account for operational problems and anomalous behavior. A dimension is a\n\t\t\tname/value pair that is part of the identity of a metric. A metric can have up to 10\n\t\t\tdimensions. For more information, see Dimensions in the Amazon CloudWatch User Guide. The dimension of an Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in\n\t\t\tyour account for operational problems and anomalous behavior. A dimension is a\n\t\t\tname/value pair that is part of the identity of a metric. A metric can have up to 10\n\t\t\tdimensions. For more information, see Dimensions in the Amazon CloudWatch User Guide. \n\t\t\tReturns the list of log groups that contain log anomalies. \n\t\t \n\t\t\tThe ID of the insight containing the log groups.\n\t\t The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned The pagination token to use to retrieve \n the next page of results for this operation. If this value is null, it retrieves the first page. \n\t\t\tThe ID of the insight containing the log groups.\n\t\t \n\t\t\tThe list of Amazon CloudWatch log groups that are related to an insight.\n\t\t The pagination token to use to retrieve \n the next page of results for this operation. If there are no more pages, this value is null. A filter used by \n\t\t\tReturns the list of all log groups that are being monitored and tagged by DevOps Guru.\n\t\t \n\t\t\tThe permission status of a resource.\n\t\t \n\t\t\tThe type of resource that you wish to retrieve, such as log groups. \n\t\t \n\t\t\tFilters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status.\n\t\t \n\t\t\tFilters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status.\n\t\t The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned The pagination token to use to retrieve \n the next page of results for this operation. If this value is null, it retrieves the first page. \n\t\t\tInformation about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource.\n\t\t The pagination token to use to retrieve \n the next page of results for this operation. If there are no more pages, this value is null. \n\t\t\tThe name of the Amazon CloudWatch log stream that the anomalous log event belongs to. A log stream is a sequence of log events that share the same source.\n\t\t \n\t\t\tThe type of log anomaly that has been detected. \n\t\t \n\t\t\tThe token where the anomaly was detected. This may refer to an exception or another location, or it may be blank for log anomalies such as format anomalies.\n\t\t \n\t\t\tThe ID of the log event.\n\t\t \n\t\t\tThe explanation for why the log event is considered an anomaly.\n\t\t \n\t\t\tThe number of log lines where this anomalous log event occurs.\n\t\t \n\t\t\tThe time of the first occurrence of the anomalous log event.\n\t\t \n\t\t\tInformation about an anomalous log event found within a log group.\n\t\t \n\t\t\tA list of anomalous log events that may be related.\n\t\t \n\t\t\tA cluster of similar anomalous log events found within a log group.\n\t\t Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups. \n\t\t\tInformation about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection.\n\t\t Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups. \n\t\t\tInformation about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection. You can use this to update the configuration.\n\t\t \n\t\t\tThe name of the resource being monitored.\n\t\t \n\t\t\tThe type of resource being monitored.\n\t\t \n\t\t\tThe permission status of a resource.\n\t\t \n\t\t\tInformation about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource.\n\t\t Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager\n\t\t\tOpsCenter for each created insight. Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager\n\t\t\tOpsCenter for each created insight. You can use this to update the configuration. Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager\n\t\t\tOpsCenter for each created insight. \n\t\t\tInformation about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups.\n\t\t \n\t\t\tInformation about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups.\n\t\t The Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). This field populates only when an Auto Scaling job run completes, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for The
@@ -426,6 +483,40 @@ export class SSOAdmin extends SSOAdminClient {
}
}
+ /**
+ *
diff --git a/clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts
new file mode 100644
index 0000000000000..abd21f373fd66
--- /dev/null
+++ b/clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts
@@ -0,0 +1,111 @@
+// smithy-typescript generated code
+import { getSerdePlugin } from "@aws-sdk/middleware-serde";
+import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
+import { Command as $Command } from "@aws-sdk/smithy-client";
+import {
+ FinalizeHandlerArguments,
+ Handler,
+ HandlerExecutionContext,
+ HttpHandlerOptions as __HttpHandlerOptions,
+ MetadataBearer as __MetadataBearer,
+ MiddlewareStack,
+ SerdeContext as __SerdeContext,
+} from "@aws-sdk/types";
+
+import {
+ DeletePermissionsBoundaryFromPermissionSetRequest,
+ DeletePermissionsBoundaryFromPermissionSetResponse,
+} from "../models/models_0";
+import {
+ deserializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand,
+ serializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand,
+} from "../protocols/Aws_json1_1";
+import { ServiceInputTypes, ServiceOutputTypes, SSOAdminClientResolvedConfig } from "../SSOAdminClient";
+
+export interface DeletePermissionsBoundaryFromPermissionSetCommandInput
+ extends DeletePermissionsBoundaryFromPermissionSetRequest {}
+export interface DeletePermissionsBoundaryFromPermissionSetCommandOutput
+ extends DeletePermissionsBoundaryFromPermissionSetResponse,
+ __MetadataBearer {}
+
+/**
+ *
/
. For more information, see Friendly names and paths in the Identity and Access Management user guide.PermissionSet
.PermissionSet
.PermissionSet
.PermissionSet
.CustomerManagedPolicyReference
+ * to use the name and path of a customer managed policy, or ManagedPolicyArn
to use the ARN of an Amazon Web Services managed IAM policy. A permissions boundary represents the maximum permissions that any policy
+ * can grant your role. For more information, see Permissions boundaries for IAM entities in the Identity and Access Management User Guide.PermissionSet
.PermissionSet
.PermissionSet
.PermissionSet
.\n ProvisionPermissionSet\n
after this operation.\n Calling ProvisionPermissionSet
applies the corresponding IAM policy updates to\n all assigned accounts.\n ProvisionPermissionSet\n
after this operation.\n Calling ProvisionPermissionSet
applies the corresponding IAM policy updates to\n all assigned accounts./
. For more information, see Friendly names and paths in the Identity and Access Management user guide.PermissionSet
.PermissionSet
.PermissionSet
.PermissionSet
.CustomerManagedPolicyReference
\n to use the name and path of a customer managed policy, or ManagedPolicyArn
to use the ARN of an Amazon Web Services managed IAM policy. A permissions boundary represents the maximum permissions that any policy\n can grant your role. For more information, see Permissions boundaries for IAM entities in the Identity and Access Management User Guide.PermissionSet
.PermissionSet
.Bayesian
. To randomly search, set it to Random
. For
- * information about search strategies, see How
- * Hyperparameter Tuning Works.OFF
):
- *
- */
- TrainingJobEarlyStoppingType?: TrainingJobEarlyStoppingType | string;
-
- /**
- * registry/repository[:tag]
and registry/repository[@digest]
- * image path formats. For more information, see Using Your Own Algorithms with Amazon
- * SageMaker.Pipe
mode, Amazon SageMaker streams data directly
- * from Amazon S3 to the container.File
mode, SageMaker
- * downloads the training data from S3 to the provisioned ML storage volume, and mounts the
- * directory to the Docker volume for the training container.FastFile
mode, SageMaker streams data directly
- * from S3 to the container with no code changes, and provides file system access to
- * the data. Users can author their training script to interact with these files as if
- * they were stored on disk.FastFile
mode works best when the data is read sequentially.
- * Augmented manifest files aren't supported.
- * The startup time is lower when there are fewer files in the S3 bucket provided.TrainingImage
.InternalServerError
. RetryStrategy
is specified as part of
- * the CreateTrainingJob
and CreateHyperParameterTuningJob
- * requests. You can add the StoppingCondition
parameter to the request to
- * limit the training time for the complete job.SecondaryStatus
is changed to STARTING
.Type
- * parameter.Array Members
refers to
- * the maximum number of hyperparameters for each range and also the maximum for the
- * hyperparameter tuning job itself. That is, the sum of the number of hyperparameters
- * for all the ranges can't exceed the maximum number specified.File
as the TrainingInputMode
in the
- * algorithm specification. For distributed training algorithms, specify an instance count
- * greater than 1.True
. Encryption provides greater security for distributed training,
- * but training might take longer. How long it takes depends on the amount of communication
- * between compute instances, especially if you use a deep learning algorithm in
- * distributed training.True
) or
- * not (False
).InternalServerError
.
- *
- */
- WarmStartType: HyperParameterTuningJobWarmStartType | string | undefined;
-}
-
-export namespace HyperParameterTuningJobWarmStartConfig {
- /**
- * @internal
- */
- export const filterSensitiveLog = (obj: HyperParameterTuningJobWarmStartConfig): any => ({
- ...obj,
- });
-}
-
-export interface CreateHyperParameterTuningJobRequest {
- /**
- * IDENTICAL_DATA_AND_ALGORITHM
as the
- * WarmStartType
value for the warm start configuration, the training job
- * that performs the best in the new tuning job is compared to the best training jobs from
- * the parent tuning jobs. From these, the training job that performs the best as measured
- * by the objective metric is returned as the overall best training job.Bayesian
. To randomly search, set it to Random
. For
+ * information about search strategies, see How
+ * Hyperparameter Tuning Works.OFF
):
+ *
+ */
+ TrainingJobEarlyStoppingType?: TrainingJobEarlyStoppingType | string;
+
+ /**
+ * registry/repository[:tag]
and registry/repository[@digest]
+ * image path formats. For more information, see Using Your Own Algorithms with Amazon
+ * SageMaker.Pipe
mode, Amazon SageMaker streams data directly
+ * from Amazon S3 to the container.File
mode, SageMaker
+ * downloads the training data from S3 to the provisioned ML storage volume, and mounts the
+ * directory to the Docker volume for the training container.FastFile
mode, SageMaker streams data directly
+ * from S3 to the container with no code changes, and provides file system access to
+ * the data. Users can author their training script to interact with these files as if
+ * they were stored on disk.FastFile
mode works best when the data is read sequentially.
+ * Augmented manifest files aren't supported.
+ * The startup time is lower when there are fewer files in the S3 bucket provided.TrainingImage
.InternalServerError
. RetryStrategy
is specified as part of
+ * the CreateTrainingJob
and CreateHyperParameterTuningJob
+ * requests. You can add the StoppingCondition
parameter to the request to
+ * limit the training time for the complete job.SecondaryStatus
is changed to STARTING
.Type
+ * parameter.Array Members
refers to
+ * the maximum number of hyperparameters for each range and also the maximum for the
+ * hyperparameter tuning job itself. That is, the sum of the number of hyperparameters
+ * for all the ranges can't exceed the maximum number specified.File
as the TrainingInputMode
in the
+ * algorithm specification. For distributed training algorithms, specify an instance count
+ * greater than 1.True
. Encryption provides greater security for distributed training,
+ * but training might take longer. How long it takes depends on the amount of communication
+ * between compute instances, especially if you use a deep learning algorithm in
+ * distributed training.True
) or
+ * not (False
).InternalServerError
.
+ *
+ */
+ WarmStartType: HyperParameterTuningJobWarmStartType | string | undefined;
+}
+
+export namespace HyperParameterTuningJobWarmStartConfig {
+ /**
+ * @internal
+ */
+ export const filterSensitiveLog = (obj: HyperParameterTuningJobWarmStartConfig): any => ({
+ ...obj,
+ });
+}
+
+export interface CreateHyperParameterTuningJobRequest {
+ /**
+ * IDENTICAL_DATA_AND_ALGORITHM
as the
+ * WarmStartType
value for the warm start configuration, the training job
+ * that performs the best in the new tuning job is compared to the best training jobs from
+ * the parent tuning jobs. From these, the training job that performs the best as measured
+ * by the objective metric is returned as the overall best training job.HyperParameterTuningJobObjective
parameter of HyperParameterTuningJobConfig.TrainingStartTime
and this time.
- * For successful jobs and stopped jobs, this is the time after model artifacts are
- * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
- *
- *
- *
- *
- *
- */
- ObjectiveStatus?: ObjectiveStatus | string;
-}
-
-export namespace HyperParameterTrainingJobSummary {
- /**
- * @internal
- */
- export const filterSensitiveLog = (obj: HyperParameterTrainingJobSummary): any => ({
- ...obj,
- });
-}
-
-export enum HyperParameterTuningJobStatus {
- COMPLETED = "Completed",
- FAILED = "Failed",
- IN_PROGRESS = "InProgress",
- STOPPED = "Stopped",
- STOPPING = "Stopping",
-}
-
-/**
- * WarmStartType
of IDENTICAL_DATA_AND_ALGORITHM
, this is the
- * TrainingJobSummary for the training job with the best objective
- * metric value of all training jobs launched by this tuning job and all parent jobs
- * specified for the warm start tuning job.HyperParameterTuningJobObjective
parameter of HyperParameterTuningJobConfig.TrainingStartTime
and this time.
+ * For successful jobs and stopped jobs, this is the time after model artifacts are
+ * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
+ *
+ *
+ *
+ *
+ *
+ */
+ ObjectiveStatus?: ObjectiveStatus | string;
}
-export namespace EnvironmentParameter {
+export namespace HyperParameterTrainingJobSummary {
/**
* @internal
*/
- export const filterSensitiveLog = (obj: EnvironmentParameter): any => ({
+ export const filterSensitiveLog = (obj: HyperParameterTrainingJobSummary): any => ({
...obj,
});
}
+export enum HyperParameterTuningJobStatus {
+ COMPLETED = "Completed",
+ FAILED = "Failed",
+ IN_PROGRESS = "InProgress",
+ STOPPED = "Stopped",
+ STOPPING = "Stopping",
+}
+
/**
- * WarmStartType
of IDENTICAL_DATA_AND_ALGORITHM
, this is the
+ * TrainingJobSummary for the training job with the best objective
+ * metric value of all training jobs launched by this tuning job and all parent jobs
+ * specified for the warm start tuning job.
- *
- * {
- * "document-version": "2018-11-28"
- * "labels": [
- * {
- * "label": "label 1"
- * },
- * {
- * "label": "label 2"
- * },
- * ...
- * {
- * "label": "label n"
- * }
- * ]
- * }
- * True
, no inbound or outbound network calls can be made to or from the
- * model container.Failed
, the reason for the failure.
+ *
+ * {
+ * "document-version": "2018-11-28"
+ * "labels": [
+ * {
+ * "label": "label 1"
+ * },
+ * {
+ * "label": "label 2"
+ * },
+ * ...
+ * {
+ * "label": "label n"
+ * }
+ * ]
+ * }
+ * True
, no inbound or outbound network calls can be made to or from the
+ * model container.
- *
- */
- MonitoringType?: MonitoringType | string;
+export enum DetailedModelPackageStatus {
+ COMPLETED = "Completed",
+ FAILED = "Failed",
+ IN_PROGRESS = "InProgress",
+ NOT_STARTED = "NotStarted",
+}
+/**
+ * DATA_QUALITY
- The schedule is for a data quality monitoring
- * job.MODEL_QUALITY
- The schedule is for a model quality monitoring
- * job.MODEL_BIAS
- The schedule is for a bias monitoring job.MODEL_EXPLAINABILITY
- The schedule is for an explainability
- * monitoring job.Failed
, the reason for the failure.Failed
, the reason it failed.
+ *
*/
- PipelineExperimentConfig?: PipelineExperimentConfig;
+ MonitoringType?: MonitoringType | string;
/**
- * DATA_QUALITY
- The schedule is for a data quality monitoring
+ * job.MODEL_QUALITY
- The schedule is for a model quality monitoring
+ * job.MODEL_BIAS
- The schedule is for a bias monitoring job.MODEL_EXPLAINABILITY
- The schedule is for an explainability
+ * monitoring job.Failed
, the reason it failed.
- *
+ * AVAILABLE
- Stable state, ready to perform any operation. The most recent operation succeeded and completed.UNDER_CHANGE
- Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.TAINTED
- Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.ERROR
- An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.PLAN_IN_PROGRESS
- Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.Workteam
instance that contains information about the work team.
- *
- *
- *
- * Starting
- * - Starting the training job.Downloading
- An optional stage for algorithms that
- * support File
training input mode. It indicates that
- * data is being downloaded to the ML storage volumes.Training
- Training is in progress.Uploading
- Training is complete and the model
- * artifacts are being uploaded to the S3 location.
- *
- * Completed
- The training job has completed.
- *
- * Failed
- The training job has failed. The reason for
- * the failure is returned in the FailureReason
field of
- * DescribeTrainingJobResponse
.
- *
- * MaxRuntimeExceeded
- The job stopped because it
- * exceeded the maximum allowed runtime.Stopped
- The training job has stopped.
- *
- * Stopping
- Stopping the training job.
- *
- */
- Status: SecondaryStatus | string | undefined;
-
- /**
- * LaunchingMLInstances
- * PreparingTrainingStack
- * DownloadingTrainingImage
- *
- *
- *
- *
- *
- *
- * TrainingJobStatus
and SecondaryStatus
in DescribeTrainingJob, and StatusMessage
together. For
- * example, at the start of a training job, you might see the following:
- *
- */
- StatusMessage?: string;
-}
-
-export namespace SecondaryStatusTransition {
- /**
- * @internal
- */
- export const filterSensitiveLog = (obj: SecondaryStatusTransition): any => ({
- ...obj,
- });
-}
-
-export interface DescribeTrainingJobResponse {
- /**
- * TrainingJobStatus
- InProgressSecondaryStatus
- TrainingStatusMessage
- Downloading the training image
- *
- * InProgress
- The training is in progress.Completed
- The training job has completed.Failed
- The training job has failed. To see the reason for the
- * failure, see the FailureReason
field in the response to a
- * DescribeTrainingJobResponse
call.Stopping
- The training job is stopping.Stopped
- The training job has stopped.SecondaryStatus
. StatusMessage
- * under SecondaryStatusTransition.
- *
- *
- *
- *
- * Starting
- * - Starting the training job.Downloading
- An optional stage for algorithms that
- * support File
training input mode. It indicates that
- * data is being downloaded to the ML storage volumes.Training
- Training is in progress.Interrupted
- The job stopped because the managed
- * spot training instances were interrupted. Uploading
- Training is complete and the model
- * artifacts are being uploaded to the S3 location.
- *
- * Completed
- The training job has completed.
- *
- * Failed
- The training job has failed. The reason for
- * the failure is returned in the FailureReason
field of
- * DescribeTrainingJobResponse
.
- *
- * MaxRuntimeExceeded
- The job stopped because it
- * exceeded the maximum allowed runtime.MaxWaitTimeExceeded
- The job stopped because it
- * exceeded the maximum allowed wait time.Stopped
- The training job has stopped.
- *
- * Stopping
- Stopping the training job.SecondaryStatus
are subject to change.
- *
- */
- SecondaryStatus: SecondaryStatus | string | undefined;
-
- /**
- * LaunchingMLInstances
- * PreparingTraining
- * DownloadingTrainingImage
- * Channel
objects that describes each data input channel.
- * SIGTERM
signal, which delays
- * job termination for 120 seconds. Algorithms can use this 120-second window to save the
- * model artifacts, so the results of training are not lost. TrainingEndTime
. The start time in CloudWatch Logs might be later than this time.
- * The difference is due to the time it takes to download the training data and to the size
- * of the training container.TrainingStartTime
and this time.
- * For successful jobs and stopped jobs, this is the time after model artifacts are
- * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.MetricData
objects that specify the names, values, and
- * dates and times that the training algorithm emitted to Amazon CloudWatch.True
. If you
- * enable network isolation for training jobs that are configured to use a VPC, SageMaker
- * downloads and uploads customer data and model artifacts through the specified VPC, but
- * the training container does not have network access.True
. Encryption provides greater security for distributed training,
- * but training might take longer. How long it takes depends on the amount of communication
- * between compute instances, especially if you use a deep learning algorithms in
- * distributed training.True
) or
- * not (False
).BillableTimeInSeconds
by the number of instances
- * (InstanceCount
) in your training cluster to get the total compute time
- * SageMaker bills you if you run distributed training. The formula is as follows:
- * BillableTimeInSeconds * InstanceCount
.(1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example,
- * if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is
- * 500, the savings is 80%.DebugHookConfig
parameter,
- * see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job.
+ *
*
*/
- ExperimentConfig?: ExperimentConfig;
-
- /**
- * AVAILABLE
- Stable state, ready to perform any operation. The most recent operation succeeded and completed.UNDER_CHANGE
- Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.TAINTED
- Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.ERROR
- An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.PLAN_IN_PROGRESS
- Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.InternalServerError
.FailureReason
field.FailureReason
describes
- * why
- * it failed. A transform job creates a log file, which includes error
- * messages, and stores it
- * as
- * an Amazon S3 object. For more information, see Log Amazon SageMaker Events with
- * Amazon CloudWatch.SplitType
- * to
- * Line
, RecordIO
, or
- * TFRecord
.TransformEndTime
.TransformStartTime
.
- *
+ * DisplayName
isn't specified,
- * TrialName
is displayed.Workteam
instance that contains information about the work team.
+ *
+ *
+ *
+ * Starting
+ * - Starting the training job.Downloading
- An optional stage for algorithms that
+ * support File
training input mode. It indicates that
+ * data is being downloaded to the ML storage volumes.Training
- Training is in progress.Uploading
- Training is complete and the model
+ * artifacts are being uploaded to the S3 location.
+ *
+ * Completed
- The training job has completed.
+ *
+ * Failed
- The training job has failed. The reason for
+ * the failure is returned in the FailureReason
field of
+ * DescribeTrainingJobResponse
.
+ *
+ * MaxRuntimeExceeded
- The job stopped because it
+ * exceeded the maximum allowed runtime.Stopped
- The training job has stopped.
+ *
+ * Stopping
- Stopping the training job.
+ *
*/
- Count?: number;
+ Status: SecondaryStatus | string | undefined;
/**
- * LaunchingMLInstances
+ * PreparingTrainingStack
+ * DownloadingTrainingImage
+ *
+ *
+ *
+ *
+ *
+ *
+ * TrainingJobStatus
and SecondaryStatus
in DescribeTrainingJob, and StatusMessage
together. For
+ * example, at the start of a training job, you might see the following:
+ *
+ */
+ StatusMessage?: string;
}
-export namespace TrialComponentMetricSummary {
+export namespace SecondaryStatusTransition {
/**
* @internal
*/
- export const filterSensitiveLog = (obj: TrialComponentMetricSummary): any => ({
+ export const filterSensitiveLog = (obj: SecondaryStatusTransition): any => ({
...obj,
});
}
-/**
- * TrainingJobStatus
- InProgressSecondaryStatus
- TrainingStatusMessage
- Downloading the training imageDisplayName
isn't specified,
- * TrialComponentName
is displayed.
+ *
+ * InProgress
- The training is in progress.Completed
- The training job has completed.Failed
- The training job has failed. To see the reason for the
+ * failure, see the FailureReason
field in the response to a
+ * DescribeTrainingJobResponse
call.Stopping
- The training job is stopping.Stopped
- The training job has stopped.SecondaryStatus
.
+ *
StatusMessage
+ * under SecondaryStatusTransition.
+ *
+ *
+ *
+ *
+ * Starting
+ * - Starting the training job.Downloading
- An optional stage for algorithms that
+ * support File
training input mode. It indicates that
+ * data is being downloaded to the ML storage volumes.Training
- Training is in progress.Interrupted
- The job stopped because the managed
+ * spot training instances were interrupted. Uploading
- Training is complete and the model
+ * artifacts are being uploaded to the S3 location.
+ *
+ * Completed
- The training job has completed.
+ *
+ * Failed
- The training job has failed. The reason for
+ * the failure is returned in the FailureReason
field of
+ * DescribeTrainingJobResponse
.
+ *
+ * MaxRuntimeExceeded
- The job stopped because it
+ * exceeded the maximum allowed runtime.MaxWaitTimeExceeded
- The job stopped because it
+ * exceeded the maximum allowed wait time.Stopped
- The training job has stopped.
+ *
+ * Stopping
- Stopping the training job.SecondaryStatus
are subject to change.
*
*/
- Status?: TrialComponentStatus;
+ SecondaryStatus: SecondaryStatus | string | undefined;
/**
- * LaunchingMLInstances
+ * PreparingTraining
+ * DownloadingTrainingImage
+ * Channel
objects that describes each data input channel.
+ * SIGTERM
signal, which delays
+ * job termination for 120 seconds. Algorithms can use this 120-second window to save the
+ * model artifacts, so the results of training are not lost. TrainingEndTime
. The start time in CloudWatch Logs might be later than this time.
+ * The difference is due to the time it takes to download the training data and to the size
+ * of the training container.TrainingStartTime
and this time.
+ * For successful jobs and stopped jobs, this is the time after model artifacts are
+ * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.MetricData
objects that specify the names, values, and
+ * dates and times that the training algorithm emitted to Amazon CloudWatch.True
. If you
+ * enable network isolation for training jobs that are configured to use a VPC, SageMaker
+ * downloads and uploads customer data and model artifacts through the specified VPC, but
+ * the training container does not have network access.True
. Encryption provides greater security for distributed training,
+ * but training might take longer. How long it takes depends on the amount of communication
+ * between compute instances, especially if you use a deep learning algorithms in
+ * distributed training.True
) or
+ * not (False
).BillableTimeInSeconds
by the number of instances
+ * (InstanceCount
) in your training cluster to get the total compute time
+ * SageMaker bills you if you run distributed training. The formula is as follows:
+ * BillableTimeInSeconds * InstanceCount
.(1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example,
+ * if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is
+ * 500, the savings is 80%.DebugHookConfig
parameter,
+ * see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job.
+ *
*/
- UserProfileArn?: string;
+ ExperimentConfig?: ExperimentConfig;
/**
- * InternalServerError
.WorkforceName
is automatically set to default
when a
- * workforce is created and cannot be modified. FailureReason
field.FailureReason
describes
+ * why
+ * it failed. A transform job creates a log file, which includes error
+ * messages, and stores it
+ * as
+ * an Amazon S3 object. For more information, see Log Amazon SageMaker Events with
+ * Amazon CloudWatch.SplitType
+ * to
+ * Line
, RecordIO
, or
+ * TFRecord
.TransformEndTime
.TransformStartTime
.
+ *
*/
- FailureReason?: string;
+ ExperimentConfig?: ExperimentConfig;
}
-export namespace Workforce {
+export namespace DescribeTransformJobResponse {
/**
* @internal
*/
- export const filterSensitiveLog = (obj: Workforce): any => ({
+ export const filterSensitiveLog = (obj: DescribeTransformJobResponse): any => ({
...obj,
});
}
-export interface DescribeWorkforceResponse {
+export interface DescribeTrialRequest {
/**
- * MemberDefinition
objects that contains objects that identify
- * the workers that make up the work team. CognitoMemberDefinition
. For workforces created using your own OIDC identity
- * provider (IdP) use OidcMemberDefinition
.DisplayName
isn't specified,
+ * TrialName
is displayed.Workteam
instance that contains information about the work team.
- * DisplayName
isn't specified,
+ * TrialComponentName
is displayed.
+ *
*/
- DeviceArn: string | undefined;
+ Status?: TrialComponentStatus;
/**
- * RStudioServerPro
Domain-level app.RStudioServerPro
Domain-level app.Domain
configuration settings to update.RStudioServerPro
Domain-level app settings to update.WorkforceName
is automatically set to default
when a
+ * workforce is created and cannot be modified. ContributedTo
,
- * Produced
, or DerivedFrom
.
- *
+ * PENDING
- The schedule is pending being created.FAILED
- The schedule failed.SCHEDULED
- The schedule was successfully created.STOPPED
- The schedule was stopped.MemberDefinition
objects that contains objects that identify
+ * the workers that make up the work team. CognitoMemberDefinition
. For workforces created using your own OIDC identity
+ * provider (IdP) use OidcMemberDefinition
.Workteam
instance that contains information about the work team.
+ *
- *
- * OutOfService
: Endpoint is not available to take incoming
- * requests.Creating
: CreateEndpoint is executing.Updating
: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.SystemUpdating
: Endpoint is undergoing maintenance and cannot be
- * updated or deleted or re-scaled until it has completed. This maintenance
- * operation does not change any customer-specified values such as VPC config, KMS
- * encryption, model, instance type, or instance count.RollingBack
: Endpoint fails to scale up or down or change its
- * variant weight and is in the process of rolling back to its previous
- * configuration. Once the rollback completes, endpoint returns to an
- * InService
status. This transitional status only applies to an
- * endpoint that has autoscaling enabled and is undergoing variant weight or
- * capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called
- * explicitly.InService
: Endpoint is available to process incoming
- * requests.Deleting
: DeleteEndpoint is executing.Failed
: Endpoint could not be created, updated, or re-scaled. Use
- * DescribeEndpointOutput$FailureReason for information about
- * the failure. DeleteEndpoint is the only operation that can be
- * performed on a failed endpoint.DisplayName
isn't specified,
- * ExperimentName
is displayed.ExperimentName
.DisplayName
isn't specified,
- * ExperimentName
is displayed.FeatureGroup
.FeatureGroup
.Feature
whose value uniquely identifies a
- * Record
defined in the FeatureGroup
- * FeatureDefinitions
.EventTime
of a Record in a
- * FeatureGroup
.EventTime
is point in time when a new event
- * occurs that corresponds to the creation or update of a Record
in
- * FeatureGroup
. All Records
in the FeatureGroup
- * must have a corresponding EventTime
.Feature
s. Each Feature
must include a
- * FeatureName
and a FeatureType
. FeatureType
s are Integral
, Fractional
and
- * String
. FeatureName
s cannot be any of the following: is_deleted
,
- * write_time
, api_invocation_time
.FeatureDefinition
s per
- * FeatureGroup
.FeatureGroup
was created.KMSKeyId
, for at rest data encryption. You can turn
- * OnlineStore
on or off by specifying the EnableOnlineStore
flag
- * at General Assembly; the default value is False
.OfflineStore
.OfflineStoreConfig
in a request to
- * CreateFeatureGroup
to create an OfflineStore
.OfflineStore
using at rest data encryption, specify Amazon Web Services Key
- * Management Service (KMS) key ID, or KMSKeyId
, in
- * S3StorageConfig
.FeatureGroup
status.OfflineStore
.FeatureGroup
failed to
- * be replicated in the OfflineStore
. This is
- * failure may be due to a failure to create a FeatureGroup
in
- * or delete a FeatureGroup
from the OfflineStore
.RStudioServerPro
Domain-level app.FeatureGroup
.RStudioServerPro
Domain-level app.FeatureGroup
.CreationTime
, FeatureGroup
values,
- * LastUpdatedTime
and EnableOnlineStorage
status of a
- * FeatureGroup
.Domain
configuration settings to update.FeatureGroup
.RStudioServerPro
Domain-level app settings to update.FeatureGroup
.FeatureGroup
.Creating
, Created
, CreateFail
,
- * Deleting
or DetailFail
. OfflineStore
has failed. Returns
- * either: Active
or Blocked
.ContributedTo
,
+ * Produced
, or DerivedFrom
.Value
, but not an Operator
, Amazon SageMaker uses the
- * equals operator.
- *
- */
-export interface Filter {
- /**
- * "Metrics.
, where
is
- * a metric name. For example, the following filter searches for training jobs
- * with an "accuracy"
metric greater than
- * "0.9"
:{
- * "Name": "Metrics.accuracy",
- * "Operator": "GreaterThan",
- * "Value": "0.9"
- * }
- * "HyperParameters.
. Decimal hyperparameter
- * values are treated as a decimal in a comparison if the specified
- * Value
is also a decimal value. If the specified
- * Value
is an integer, the decimal hyperparameter values are
- * treated as integers. For example, the following filter is satisfied by
- * training jobs with a "learning_rate"
hyperparameter that is
- * less than "0.5"
: {
- * "Name": "HyperParameters.learning_rate",
- * "Operator": "LessThan",
- * "Value": "0.5"
- * }
- * Tags.
.TrainingJobName
. For
- * valid property names, see SearchRecord.
- * You must specify a valid property for the resource.
- *
+/**
+ * Name
equals Value
.Name
doesn't equal Value
.Name
property exists.Name
property does not exist.Name
is greater than Value
.
- * Not supported for text properties.Name
is greater than or equal to Value
.
- * Not supported for text properties.Name
is less than Value
.
- * Not supported for text properties.Name
is less than or equal to Value
.
- * Not supported for text properties.Name
is one of the comma delimited strings in
- * Value
. Only supported for text properties.Name
contains the string Value
.
- * Only supported for text properties.SearchExpression
can include the Contains
operator
- * multiple times when the value of Name
is one of the following:
- *
- * Experiment.DisplayName
- * Experiment.ExperimentName
- * Experiment.Tags
- * Trial.DisplayName
- * Trial.TrialName
- * Trial.Tags
- * TrialComponent.DisplayName
- * TrialComponent.TrialComponentName
- * TrialComponent.Tags
- * TrialComponent.InputArtifacts
- * TrialComponent.OutputArtifacts
- * SearchExpression
can include only one Contains
operator
- * for all other values of Name
. In these cases, if you include multiple
- * Contains
operators in the SearchExpression
, the result is
- * the following error message: "'CONTAINS' operator usage limit of 1
- * exceeded.
"Name
and Operator
to determine which
- * resources satisfy the filter's condition. For numerical properties, Value
- * must be an integer or floating-point decimal. For timestamp properties,
- * Value
must be an ISO 8601 date-time string of the following format:
- * YYYY-mm-dd'T'HH:MM:SS
.Failed
.
+ *
*/
- ReportGenerated?: Date;
+ MonitoringScheduleStatus?: ScheduleStatus | string;
/**
- * PENDING
- The schedule is pending being created.FAILED
- The schedule failed.SCHEDULED
- The schedule was successfully created.STOPPED
- The schedule was stopped.SuggestionQuery
type. Specifies a hint for retrieving property
- * names that begin with the specified text.
+ *
+ * OutOfService
: Endpoint is not available to take incoming
+ * requests.Creating
: CreateEndpoint is executing.Updating
: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.SystemUpdating
: Endpoint is undergoing maintenance and cannot be
+ * updated or deleted or re-scaled until it has completed. This maintenance
+ * operation does not change any customer-specified values such as VPC config, KMS
+ * encryption, model, instance type, or instance count.RollingBack
: Endpoint fails to scale up or down or change its
+ * variant weight and is in the process of rolling back to its previous
+ * configuration. Once the rollback completes, endpoint returns to an
+ * InService
status. This transitional status only applies to an
+ * endpoint that has autoscaling enabled and is undergoing variant weight or
+ * capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called
+ * explicitly.InService
: Endpoint is available to process incoming
+ * requests.Deleting
: DeleteEndpoint is executing.Failed
: Endpoint could not be created, updated, or re-scaled. Use
+ * DescribeEndpointOutput$FailureReason for information about
+ * the failure. DeleteEndpoint is the only operation that can be
+ * performed on a failed endpoint.GetSearchSuggestions
call that specifies
- * a value in the PropertyNameQuery
field.Resource
that match a
- * SuggestionQuery
.DisplayName
isn't specified,
+ * ExperimentName
is displayed.AWSCURRENT
and must be in the following format:{"username": UserName, "password":
- * Password}
- * ExperimentName
.DisplayName
isn't specified,
+ * ExperimentName
is displayed.ImageVersion
.FeatureGroup
.FeatureGroup
.Feature
whose value uniquely identifies a
+ * Record
defined in the FeatureGroup
+ * FeatureDefinitions
.EventTime
of a Record in a
+ * FeatureGroup
.EventTime
is point in time when a new event
+ * occurs that corresponds to the creation or update of a Record
in
+ * FeatureGroup
. All Records
in the FeatureGroup
+ * must have a corresponding EventTime
.Feature
s. Each Feature
must include a
+ * FeatureName
and a FeatureType
. FeatureType
s are Integral
, Fractional
and
+ * String
. FeatureName
s cannot be any of the following: is_deleted
,
+ * write_time
, api_invocation_time
.FeatureDefinition
s per
+ * FeatureGroup
.FeatureGroup
was created.KMSKeyId
, for at rest data encryption. You can turn
+ * OnlineStore
on or off by specifying the EnableOnlineStore
flag
+ * at General Assembly; the default value is False
.OfflineStore
.OfflineStoreConfig
in a request to
+ * CreateFeatureGroup
to create an OfflineStore
.OfflineStore
using at rest data encryption, specify Amazon Web Services Key
+ * Management Service (KMS) key ID, or KMSKeyId
, in
+ * S3StorageConfig
.Image
. A version represents an existing container
- * image.FeatureGroup
status.OfflineStore
.FeatureGroup
failed to
+ * be replicated in the OfflineStore
. This is
+ * failure may be due to a failure to create a FeatureGroup
in
+ * or delete a FeatureGroup
from the OfflineStore
.FeatureGroup
.FeatureGroup
.CreationTime
, FeatureGroup
values,
+ * LastUpdatedTime
and EnableOnlineStorage
status of a
+ * FeatureGroup
.FeatureGroup
.FeatureGroup
.FeatureGroup
.Creating
, Created
, CreateFail
,
+ * Deleting
or DetailFail
. OfflineStore
has failed. Returns
+ * either: Active
or Blocked
.Value
, but not an Operator
, Amazon SageMaker uses the
+ * equals operator.
+ *
*/
-export interface LabelCountersForWorkteam {
+export interface Filter {
/**
- * "Metrics.
, where
is
+ * a metric name. For example, the following filter searches for training jobs
+ * with an "accuracy"
metric greater than
+ * "0.9"
:{
+ * "Name": "Metrics.accuracy",
+ * "Operator": "GreaterThan",
+ * "Value": "0.9"
+ * }
+ * "HyperParameters.
. Decimal hyperparameter
+ * values are treated as a decimal in a comparison if the specified
+ * Value
is also a decimal value. If the specified
+ * Value
is an integer, the decimal hyperparameter values are
+ * treated as integers. For example, the following filter is satisfied by
+ * training jobs with a "learning_rate"
hyperparameter that is
+ * less than "0.5"
: {
+ * "Name": "HyperParameters.learning_rate",
+ * "Operator": "LessThan",
+ * "Value": "0.5"
+ * }
+ * Tags.
.TrainingJobName
. For
+ * valid property names, see SearchRecord.
+ * You must specify a valid property for the resource.
+ *
*/
- PendingHuman?: number;
+ Operator?: Operator | string;
/**
- * Name
equals Value
.Name
doesn't equal Value
.Name
property exists.Name
property does not exist.Name
is greater than Value
.
+ * Not supported for text properties.Name
is greater than or equal to Value
.
+ * Not supported for text properties.Name
is less than Value
.
+ * Not supported for text properties.Name
is less than or equal to Value
.
+ * Not supported for text properties.Name
is one of the comma delimited strings in
+ * Value
. Only supported for text properties.Name
contains the string Value
.
+ * Only supported for text properties.SearchExpression
can include the Contains
operator
+ * multiple times when the value of Name
is one of the following:
+ *
+ * Experiment.DisplayName
+ * Experiment.ExperimentName
+ * Experiment.Tags
+ * Trial.DisplayName
+ * Trial.TrialName
+ * Trial.Tags
+ * TrialComponent.DisplayName
+ * TrialComponent.TrialComponentName
+ * TrialComponent.Tags
+ * TrialComponent.InputArtifacts
+ * TrialComponent.OutputArtifacts
+ * SearchExpression
can include only one Contains
operator
+ * for all other values of Name
. In these cases, if you include multiple
+ * Contains
operators in the SearchExpression
, the result is
+ * the following error message: "'CONTAINS' operator usage limit of 1
+ * exceeded.
"Name
and Operator
to determine which
+ * resources satisfy the filter's condition. For numerical properties, Value
+ * must be an integer or floating-point decimal. For timestamp properties,
+ * Value
must be an ISO 8601 date-time string of the following format:
+ * YYYY-mm-dd'T'HH:MM:SS
.LabelingJobStatus
field is Failed
, this field
- * contains a description of the error.Failed
.CreationTime
.Descending
.ListActions
didn't return the full set of actions,
- * the call returns a token for getting the next set of actions.ListAlgorithms
request was truncated, the
- * response includes a NextToken
. To retrieve the next set of algorithms, use
- * the token in the next request.CreationTime
.Ascending
.AlgorithmSummary
objects, each of which lists an
- * algorithm.SuggestionQuery
type. Specifies a hint for retrieving property
+ * names that begin with the specified text.ListImages
didn't return the full set of
- * AppImageConfigs, the call returns a token for getting the next set of AppImageConfigs.CreationTime
.GetSearchSuggestions
call that specifies
+ * a value in the PropertyNameQuery
field.Descending
.Resource
that match a
+ * SuggestionQuery
.AWSCURRENT
and must be in the following format:{"username": UserName, "password":
+ * Password}
+ * CreationTime
.Descending
.ListArtifacts
didn't return the full set of artifacts,
- * the call returns a token for getting the next set of artifacts.ImageVersion
.Image
. A version represents an existing container
+ * image.CreationTime
.Descending
.ListAssociations
didn't return the full set of associations,
- * the call returns a token for getting the next set of associations.Descending
.Name
.Ascending
.Descending
.AutoMLCandidates
.ListCodeRepositoriesOutput
request was truncated, the
- * response includes a NextToken
. To get the next set of Git repositories, use
- * the token in the next request.LabelingJobStatus
field is Failed
, this field
+ * contains a description of the error.Name
.Ascending
.
- *
+ * ListCodeRepositoriesOutput
request was truncated, the
- * response includes a NextToken
. To get the next set of Git repositories, use
- * the token in the next request.ListCompilationJobs
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of model
- * compilation jobs, use the token in the next request.CreationTime
.Descending
.CreationTime
.ListActions
didn't return the full set of actions,
+ * the call returns a token for getting the next set of actions.Ascending
.NextToken
. To retrieve
- * the next set of model compilation jobs, use this token in the next request.CreationTime
.Descending
.ListAlgorithms
request was truncated, the
+ * response includes a NextToken
. To retrieve the next set of algorithms, use
+ * the token in the next request.ListContexts
didn't return the full set of contexts,
- * the call returns a token for getting the next set of contexts.CreationTime
.Ascending
.AlgorithmSummary
objects, each of which lists an
+ * algorithm.ListImages
didn't return the full set of
+ * AppImageConfigs, the call returns a token for getting the next set of AppImageConfigs.CreationTime
.Descending
.ListDataQualityJobDefinitions
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * transform jobs, use the token in the next request.>CreationTime
.Descending
.ListDataQualityJobDefinitions
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of data
- * quality monitoring job definitions, use the token in the next request.CreationTime
.Descending
.ListArtifacts
didn't return the full set of artifacts,
+ * the call returns a token for getting the next set of artifacts.CreationTime
.Descending
.ListAssociations
didn't return the full set of associations,
+ * the call returns a token for getting the next set of associations.Descending
.Name
.CreationTime
.Descending
.ListEndpointConfig
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * endpoint configurations, use the token in the next request. Ascending
.Descending
.AutoMLCandidates
.CreationTime
.Descending
.ListEndpoints
request was truncated, the response
- * includes a NextToken
. To retrieve the next set of endpoints, use the token
- * in the next request.ListCodeRepositoriesOutput
request was truncated, the
+ * response includes a NextToken
. To get the next set of Git repositories, use
+ * the token in the next request.Name
.Ascending
.
+ *
*/
- Endpoints: EndpointSummary[] | undefined;
+ CodeRepositorySummaryList: CodeRepositorySummary[] | undefined;
/**
- * ListCodeRepositoriesOutput
request was truncated, the
+ * response includes a NextToken
. To get the next set of Git repositories, use
+ * the token in the next request.ListCompilationJobs
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of model
+ * compilation jobs, use the token in the next request.CreationTime
.Descending
.ListExperiments
didn't return the full set of
- * experiments, the call returns a token for getting the next set of experiments.CreationTime
.Ascending
.NextToken
. To retrieve
+ * the next set of model compilation jobs, use this token in the next request.FeatureGroup
s names. Filters
- * FeatureGroup
s by name. FeatureGroup
status. Filters by FeatureGroup
status. OfflineStore
status. Filters by OfflineStore
status. FeatureGroups
s created after a specific
- * date and time.FeatureGroups
s created before a specific
- * date and time.CreationTime
.Descending
.ListFeatureGroups
.ListContexts
didn't return the full set of contexts,
+ * the call returns a token for getting the next set of contexts.ListFeatureGroups
results.ListFeatureGroups
results.CreationTime
.Ascending
or Descending
order.Descending
.ListDataQualityJobDefinitions
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * transform jobs, use the token in the next request.>MaxResults
, then a NextToken
will be provided in the output that you can use to resume pagination.Ascending
or Descending
order.MaxResults
, then a NextToken
will be provided in the output that you can use to resume pagination.ListDataQualityJobDefinitions
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of data
+ * quality monitoring job definitions, use the token in the next request.ListHyperParameterTuningJobs
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * tuning jobs, use the token in the next request.Name
.Ascending
.ListHyperParameterTuningJobs
- * request returned.ListHyperParameterTuningJobs
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of tuning jobs,
- * use the token in the next request.ListImages
didn't return the full set of images,
- * the call returns a token for getting the next set of images.CREATION_TIME
.DESCENDING
.ListImageVersions
didn't return the full set of
- * versions, the call returns a token for getting the next set of versions.CREATION_TIME
.NAME
, DEVICEFLEETNAME
, CREATIONTIME
, LASTMODIFIEDTIME
.DESCENDING
.ListInferenceRecommendationsJobsRequest
request
- * was truncated, the response includes a NextToken
. To retrieve the next set
- * of recommendations, use the token in the next request.CreationTime
.Descending
.ListEndpointConfig
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * endpoint configurations, use the token in the next request. ListLabelingJobs
request was truncated, the
- * response includes a NextToken
. To retrieve the next set of labeling jobs,
- * use the token in the next request.CreationTime
.Ascending
.LabelingJobSummary
objects, each describing a labeling
- * job.CreationTime
.Descending
.ListLabelingJobsForWorkteam
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * labeling jobs, use the token in the next request.ListEndpoints
request was truncated, the response
+ * includes a NextToken
. To retrieve the next set of endpoints, use the token
+ * in the next request.CreationTime
.Ascending
.LabelingJobSummary
objects, each describing a labeling
- * job.CreationTime
.CreationTime
.Ascending
.Descending
.ListExperiments
didn't return the full set of
+ * experiments, the call returns a token for getting the next set of experiments.FeatureGroup
s names. Filters
+ * FeatureGroup
s by name. Name
or CreationTime
field. The
- * default is CreationTime
.FeatureGroup
status. Filters by FeatureGroup
status. Ascending
or Descending
order.
- * The default is Descending
.OfflineStore
status. Filters by OfflineStore
status. FeatureGroups
s created after a specific
+ * date and time.FeatureGroups
s created before a specific
+ * date and time.ListFeatureGroups
.ListFeatureGroups
results.ListFeatureGroups
results.Name
or CreationTime
field. The
- * default is CreationTime
.Ascending
or Descending
order.
- * The default is Descending
.Ascending
or Descending
order.MaxResults
, then a NextToken
will be provided in the output that you can use to resume pagination.Ascending
or Descending
order.MaxResults
, then a NextToken
will be provided in the output that you can use to resume pagination.ListHyperParameterTuningJobs
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * tuning jobs, use the token in the next request.ListModelMetadataResponse
request was truncated,
- * the response includes a NextToken. To retrieve the next set of model metadata,
- * use the token in the next request.Name
.Ascending
.ListHyperParameterTuningJobs
+ * request returned.ListHyperParameterTuningJobs
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of tuning jobs,
+ * use the token in the next request.ListModelPackageGroups
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * model groups, use the token in the next request.ListImages
didn't return the full set of images,
+ * the call returns a token for getting the next set of images.CreationTime
.CREATION_TIME
.Ascending
.DESCENDING
.ListImageVersions
didn't return the full set of
+ * versions, the call returns a token for getting the next set of versions.CREATION_TIME
.
- *
+ * UNVERSIONED
- List only unversioined models.
- * This is the default value if no ModelPackageType
is specified.VERSIONED
- List only versioned models.BOTH
- List both versioned and unversioned models.DESCENDING
.ListModelPackages
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of model
- * packages, use the token in the next request.CreationTime
.Ascending
.
- *
+ * APPROVED
- The model is approvedREJECTED
- The model is rejected.PENDING_MANUAL_APPROVAL
- The model is waiting for manual
- * approval.ListInferenceRecommendationsJobsRequest
request
+ * was truncated, the response includes a NextToken
. To retrieve the next set
+ * of recommendations, use the token in the next request.ModelPackageSummary
objects, each of which lists a model
- * package.CreationTime
.Descending
.ListModelQualityJobDefinitions
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * model quality monitoring job definitions, use the token in the next request.ListModelQualityJobDefinitions
.ListLabelingJobs
request was truncated, the
+ * response includes a NextToken
. To retrieve the next set of labeling jobs,
+ * use the token in the next request.CreationTime
.Ascending
.LabelingJobSummary
objects, each describing a labeling
+ * job.CreationTime
.Descending
.ListModels
request was truncated, the
- * response includes a NextToken
. To retrieve the next set of models, use the
- * token in the next request.ListLabelingJobsForWorkteam
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * labeling jobs, use the token in the next request.CreationTime
.Ascending
.LabelingJobSummary
objects, each describing a labeling
+ * job.ModelSummary
objects, each of which lists a
- * model.CreationTime
.Ascending
.Status
, CreationTime
,
- * ScheduledTime
field. The default is CreationTime
.Name
or CreationTime
field. The
+ * default is CreationTime
.Ascending
or Descending
order.
@@ -9499,106 +9501,69 @@ export interface ListMonitoringExecutionsRequest {
NextToken?: string;
/**
- * Status
, CreationTime
,
- * ScheduledTime
field. The default is CreationTime
.Name
or CreationTime
field. The
+ * default is CreationTime
.Ascending
or Descending
order.
@@ -9618,1391 +9583,1427 @@ export interface ListMonitoringSchedulesRequest {
MaxResults?: number;
/**
- * ListModelMetadataResponse
request was truncated,
+ * the response includes a NextToken. To retrieve the next set of model metadata,
+ * use the token in the next request.ListNotebookInstanceLifecycleConfigs
request was
- * truncated, the response includes a NextToken
. To get the next set of
- * lifecycle configurations, use the token in the next request.CreationTime
.ListModelPackageGroups
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * model groups, use the token in the next request.CreationTime
.Ascending
.NotebookInstanceLifecycleConfiguration
objects, each listing
- * a lifecycle configuration.ListNotebookInstances
is truncated, the
- * response includes a NextToken
. You can use this token in your subsequent
- * ListNotebookInstances
request to fetch the next set of notebook
- * instances. Name
.
+ *
*/
- CreationTimeBefore?: Date;
+ ModelPackageType?: ModelPackageType | string;
/**
- * UNVERSIONED
- List only unversioined models.
+ * This is the default value if no ModelPackageType
is specified.VERSIONED
- List only versioned models.BOTH
- List both versioned and unversioned models.ListModelPackages
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of model
+ * packages, use the token in the next request.CreationTime
.Ascending
.
+ *
+ */
+ ModelApprovalStatus?: ModelApprovalStatus | string;
+}
+
+export namespace ModelPackageSummary {
+ /**
+ * @internal
+ */
+ export const filterSensitiveLog = (obj: ModelPackageSummary): any => ({
+ ...obj,
+ });
+}
+
+export interface ListModelPackagesOutput {
+ /**
+ * APPROVED
- The model is approvedREJECTED
- The model is rejected.PENDING_MANUAL_APPROVAL
- The model is waiting for manual
+ * approval.ModelPackageSummary
objects, each of which lists a model
+ * package.CreationTime
.Descending
.ListModelQualityJobDefinitions
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * model quality monitoring job definitions, use the token in the next request.ListModelQualityJobDefinitions
.ListNotebookInstances
request was
- * truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use
- * the token in the next request.NotebookInstanceSummary
objects, one for each notebook
- * instance.CreationTime
.Descending
.ListModels
request was truncated, the
+ * response includes a NextToken
. To retrieve the next set of models, use the
+ * token in the next request.CreatedTime
.ListPipelineExecutions
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipeline executions, use the token in the next request.ModelSummary
objects, each of which lists a
+ * model.ListPipelineExecutions
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipeline executions, use the token in the next request.Status
, CreationTime
,
+ * ScheduledTime
field. The default is CreationTime
.Ascending
or Descending
order.
+ * The default is Descending
.ListPipelineExecutionSteps
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.CreatedTime
.Status
, CreationTime
,
+ * ScheduledTime
field. The default is CreationTime
.Ascending
or Descending
order.
+ * The default is Descending
.False
, the previous baseline of the configured check type must be available.BaselineUsedForDriftCheckConstraints
and BaselineUsedForDriftCheckStatistics
.
- * If it is set to False
, the previous baseline of the configured check type must also be available.
- * These can be accessed through the BaselineUsedForDriftCheckConstraints
and
- * BaselineUsedForDriftCheckStatistics
properties.
- *
+ * BaselineUsedForDriftCheck
is set the same as
- * CalculatedBaseline
.
- *
+ * @internal
*/
- ClarifyCheck?: ClarifyCheckStepMetadata;
+ export const filterSensitiveLog = (obj: MonitoringScheduleSummary): any => ({
+ ...obj,
+ });
+}
+export interface ListMonitoringSchedulesResponse {
/**
- * BaselineUsedForDriftCheck
is set the same as
- * CalculatedBaseline
.ListNotebookInstanceLifecycleConfigs
request was
+ * truncated, the response includes a NextToken
. To get the next set of
+ * lifecycle configurations, use the token in the next request.CreationTime
.PipeLineExecutionStep
objects. Each
- * PipeLineExecutionStep
consists of StepName, StartTime, EndTime, StepStatus,
- * and Metadata. Metadata is an object with properties for each job that contains relevant
- * information about the job created by the step.ListPipelineExecutionSteps
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.ListPipelineParametersForExecution
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of parameters, use the token in the next request.NotebookInstanceLifecycleConfiguration
objects, each listing
+ * a lifecycle configuration.ListNotebookInstances
is truncated, the
+ * response includes a NextToken
. You can use this token in your subsequent
+ * ListNotebookInstances
request to fetch the next set of notebook
+ * instances. Name
.ListPipelineParametersForExecution
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of parameters, use the token in the next request.CreatedTime
.ListPipelines
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipelines, use the token in the next request.PipelineSummary
objects matching the specified
- * filters. Each PipelineSummary
consists of PipelineArn, PipelineName,
- * ExperimentName, PipelineDescription, CreationTime, LastModifiedTime, LastRunTime, and
- * RoleArn. This list can be empty. ListPipelines
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipelines, use the token in the next request.ListNotebookInstances
request was
+ * truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use
+ * the token in the next request.NotebookInstanceSummary
objects, one for each notebook
+ * instance.CreationTime
.CreatedTime
.Ascending
.ListProcessingJobs
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of processing
- * jobs, use the token in the next request.ListPipelineExecutions
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipeline executions, use the token in the next request.ProcessingJobSummary
objects, each listing a processing
- * job.ListPipelineExecutions
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipeline executions, use the token in the next request.ListPipelineExecutionSteps
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.CreatedTime
.False
, the previous baseline of the configured check type must be available.BaselineUsedForDriftCheckConstraints
and BaselineUsedForDriftCheckStatistics
.
+ * If it is set to False
, the previous baseline of the configured check type must also be available.
+ * These can be accessed through the BaselineUsedForDriftCheckConstraints
and
+ * BaselineUsedForDriftCheckStatistics
properties.
+ *
+ */
+ QualityCheck?: QualityCheckStepMetadata;
+
+ /**
+ * BaselineUsedForDriftCheck
is set the same as
+ * CalculatedBaseline
.
+ *
+ */
+ ClarifyCheck?: ClarifyCheckStepMetadata;
+
+ /**
+ * BaselineUsedForDriftCheck
is set the same as
+ * CalculatedBaseline
.PipeLineExecutionStep
objects. Each
+ * PipeLineExecutionStep
consists of StepName, StartTime, EndTime, StepStatus,
+ * and Metadata. Metadata is an object with properties for each job that contains relevant
+ * information about the job created by the step.ListPipelineExecutionSteps
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.ListPipelineParametersForExecution
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of parameters, use the token in the next request.ListPipelineParametersForExecution
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of parameters, use the token in the next request.CreatedTime
.ListPipelines
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipelines, use the token in the next request.PipelineSummary
objects matching the specified
+ * filters. Each PipelineSummary
consists of PipelineArn, PipelineName,
+ * ExperimentName, PipelineDescription, CreationTime, LastModifiedTime, LastRunTime, and
+ * RoleArn. This list can be empty. ListPipelines
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipelines, use the token in the next request.CreationTime
.Ascending
.ListProcessingJobs
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of processing
+ * jobs, use the token in the next request.ProcessingJobSummary
objects, each listing a processing
+ * job.NAME
, DEVICEFLEETNAME
, CREATIONTIME
, LASTMODIFIEDTIME
.AnomalyTimeRange
, which specifies the time range when
@@ -482,7 +632,7 @@ export enum AnomalySeverity {
}
/**
- * nextToken
value.nextToken
value.nextToken
value.ListInsights
to specify which insights to return.\n\t\tnextToken
value.JobRun
timeout in minutes. This is the maximum time that a job run can
- * consume resources before it is terminated and enters TIMEOUT
status. The default
- * is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.TIMEOUT
status. This value overrides the timeout value set in the parent job.G.1X
and 2 for G.2X
workers). This value may be different than the executionEngineRuntime
* MaxCapacity
as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity
. Therefore, it is possible that the value of DPUSeconds
is less than executionEngineRuntime
* MaxCapacity
.G.1X
, 2 for G.2X
, or 0.25 for G.025X
workers). This value may be different than the executionEngineRuntime
* MaxCapacity
as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity
. Therefore, it is possible that the value of DPUSeconds
is less than executionEngineRuntime
* MaxCapacity
.JobRun
timeout in minutes. This is the maximum time that a job run can
- * consume resources before it is terminated and enters TIMEOUT
status. The default
- * is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.TIMEOUT
status. This value overrides the timeout value set in the parent job.
Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
*/ Timeout?: number; diff --git a/codegen/sdk-codegen/aws-models/glue.json b/codegen/sdk-codegen/aws-models/glue.json index 91c01e8a649b0..b2432b24918a5 100644 --- a/codegen/sdk-codegen/aws-models/glue.json +++ b/codegen/sdk-codegen/aws-models/glue.json @@ -16018,7 +16018,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "The JobRun
timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT
status. The default\n is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
The JobRun
timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
" } }, "MaxCapacity": { @@ -16066,7 +16066,7 @@ "DPUSeconds": { "target": "com.amazonaws.glue#NullableDouble", "traits": { - "smithy.api#documentation": "This field populates only when an Auto Scaling job run completes, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X
and 2 for G.2X
workers). This value may be different than the executionEngineRuntime
* MaxCapacity
as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity
. Therefore, it is possible that the value of DPUSeconds
is less than executionEngineRuntime
* MaxCapacity
.
This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X
, 2 for G.2X
, or 0.25 for G.025X
workers). This value may be different than the executionEngineRuntime
* MaxCapacity
as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity
. Therefore, it is possible that the value of DPUSeconds
is less than executionEngineRuntime
* MaxCapacity
.
The JobRun
timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT
status. The default\n is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
The JobRun
timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
" } }, "MaxCapacity": { From 6481e598bf2360dfe18714996867eeed605a6f82 Mon Sep 17 00:00:00 2001 From: awstoolsSageMaker Edge Manager dataplane service for communicating with active agents.
*/ export class SagemakerEdge extends SagemakerEdgeClient { + /** + *Use to get the active deployments from a device.
+ */ + public getDeployments( + args: GetDeploymentsCommandInput, + options?: __HttpHandlerOptions + ): PromiseUse to check if a device is registered with SageMaker Edge Manager.
*/ diff --git a/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts b/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts index cb2bd0a0a254e..74830c6e85e6f 100644 --- a/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts +++ b/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts @@ -53,6 +53,7 @@ import { UserAgent as __UserAgent, } from "@aws-sdk/types"; +import { GetDeploymentsCommandInput, GetDeploymentsCommandOutput } from "./commands/GetDeploymentsCommand"; import { GetDeviceRegistrationCommandInput, GetDeviceRegistrationCommandOutput, @@ -60,9 +61,15 @@ import { import { SendHeartbeatCommandInput, SendHeartbeatCommandOutput } from "./commands/SendHeartbeatCommand"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; -export type ServiceInputTypes = GetDeviceRegistrationCommandInput | SendHeartbeatCommandInput; +export type ServiceInputTypes = + | GetDeploymentsCommandInput + | GetDeviceRegistrationCommandInput + | SendHeartbeatCommandInput; -export type ServiceOutputTypes = GetDeviceRegistrationCommandOutput | SendHeartbeatCommandOutput; +export type ServiceOutputTypes = + | GetDeploymentsCommandOutput + | GetDeviceRegistrationCommandOutput + | SendHeartbeatCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** diff --git a/clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts b/clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts new file mode 100644 index 0000000000000..ee63ab1e58a95 --- /dev/null +++ b/clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetDeploymentsRequest, GetDeploymentsResult } from "../models/models_0"; +import { + deserializeAws_restJson1GetDeploymentsCommand, + serializeAws_restJson1GetDeploymentsCommand, +} from "../protocols/Aws_restJson1"; +import { SagemakerEdgeClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SagemakerEdgeClient"; + +export interface GetDeploymentsCommandInput extends GetDeploymentsRequest {} +export interface GetDeploymentsCommandOutput extends GetDeploymentsResult, __MetadataBearer {} + +/** + *Use to get the active deployments from a device.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SagemakerEdgeClient, GetDeploymentsCommand } from "@aws-sdk/client-sagemaker-edge"; // ES Modules import + * // const { SagemakerEdgeClient, GetDeploymentsCommand } = require("@aws-sdk/client-sagemaker-edge"); // CommonJS import + * const client = new SagemakerEdgeClient(config); + * const command = new GetDeploymentsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetDeploymentsCommandInput} for command's `input` shape. + * @see {@link GetDeploymentsCommandOutput} for command's `response` shape. + * @see {@link SagemakerEdgeClientResolvedConfig | config} for SagemakerEdgeClient's `config` shape. + * + */ +export class GetDeploymentsCommand extends $Command< + GetDeploymentsCommandInput, + GetDeploymentsCommandOutput, + SagemakerEdgeClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetDeploymentsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe unique name of the device you want to get the registration status from.
+ *The unique name of the device you want to get the configuration of active deployments from.
*/ DeviceName: string | undefined; @@ -15,39 +15,144 @@ export interface GetDeviceRegistrationRequest { DeviceFleetName: string | undefined; } -export namespace GetDeviceRegistrationRequest { +export namespace GetDeploymentsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: GetDeviceRegistrationRequest): any => ({ + export const filterSensitiveLog = (obj: GetDeploymentsRequest): any => ({ ...obj, }); } -export interface GetDeviceRegistrationResult { +export enum ChecksumType { + Sha1 = "SHA1", +} + +/** + *Information about the checksum of a model deployed on a device.
+ */ +export interface Checksum { /** - *Describes if the device is currently registered with SageMaker Edge Manager.
+ *The type of the checksum.
*/ - DeviceRegistration?: string; + Type?: ChecksumType | string; /** - *The amount of time, in seconds, that the registration status is stored on the device’s cache before it is refreshed.
+ *The checksum of the model.
*/ - CacheTTL?: string; + Sum?: string; } -export namespace GetDeviceRegistrationResult { +export namespace Checksum { /** * @internal */ - export const filterSensitiveLog = (obj: GetDeviceRegistrationResult): any => ({ + export const filterSensitiveLog = (obj: Checksum): any => ({ + ...obj, + }); +} + +export enum ModelState { + Deploy = "DEPLOY", + Undeploy = "UNDEPLOY", +} + +/** + * + */ +export interface Definition { + /** + *The unique model handle.
+ */ + ModelHandle?: string; + + /** + *The absolute S3 location of the model.
+ */ + S3Url?: string; + + /** + *The checksum information of the model.
+ */ + Checksum?: Checksum; + + /** + *The desired state of the model.
+ */ + State?: ModelState | string; +} + +export namespace Definition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Definition): any => ({ + ...obj, + }); +} + +export enum FailureHandlingPolicy { + DoNothing = "DO_NOTHING", + RollbackOnFailure = "ROLLBACK_ON_FAILURE", +} + +export enum DeploymentType { + Model = "Model", +} + +/** + *Information about a deployment on an edge device that is registered with SageMaker Edge Manager.
+ */ +export interface EdgeDeployment { + /** + *The name and unique ID of the deployment.
+ */ + DeploymentName?: string; + + /** + *The type of the deployment.
+ */ + Type?: DeploymentType | string; + + /** + *Determines whether to rollback to previous configuration if deployment fails.
+ */ + FailureHandlingPolicy?: FailureHandlingPolicy | string; + + /** + *Returns a list of Definition objects.
+ */ + Definitions?: Definition[]; +} + +export namespace EdgeDeployment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EdgeDeployment): any => ({ + ...obj, + }); +} + +export interface GetDeploymentsResult { + /** + *Returns a list of the configurations of the active deployments on the device.
+ */ + Deployments?: EdgeDeployment[]; +} + +export namespace GetDeploymentsResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDeploymentsResult): any => ({ ...obj, }); } /** *An internal failure occurred. Try your request again. If the problem - * persists, contact AWS customer support.
+ * persists, contact Amazon Web Services customer support. */ export class InternalServiceException extends __BaseException { readonly name: "InternalServiceException" = "InternalServiceException"; @@ -67,6 +172,48 @@ export class InternalServiceException extends __BaseException { } } +export interface GetDeviceRegistrationRequest { + /** + *The unique name of the device you want to get the registration status from.
+ */ + DeviceName: string | undefined; + + /** + *The name of the fleet that the device belongs to.
+ */ + DeviceFleetName: string | undefined; +} + +export namespace GetDeviceRegistrationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDeviceRegistrationRequest): any => ({ + ...obj, + }); +} + +export interface GetDeviceRegistrationResult { + /** + *Describes if the device is currently registered with SageMaker Edge Manager.
+ */ + DeviceRegistration?: string; + + /** + *The amount of time, in seconds, that the registration status is stored on the device’s cache before it is refreshed.
+ */ + CacheTTL?: string; +} + +export namespace GetDeviceRegistrationResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDeviceRegistrationResult): any => ({ + ...obj, + }); +} + /** *Information required for edge device metrics.
*/ @@ -101,6 +248,109 @@ export namespace EdgeMetric { }); } +export enum DeploymentStatus { + Fail = "FAIL", + Success = "SUCCESS", +} + +/** + * + */ +export interface DeploymentModel { + /** + *The unique handle of the model.
+ */ + ModelHandle?: string; + + /** + *The name of the model.
+ */ + ModelName?: string; + + /** + *The version of the model.
+ */ + ModelVersion?: string; + + /** + *The desired state of the model.
+ */ + DesiredState?: ModelState | string; + + /** + *Returns the current state of the model.
+ */ + State?: ModelState | string; + + /** + *Returns the deployment status of the model.
+ */ + Status?: DeploymentStatus | string; + + /** + *Returns the error message for the deployment status result.
+ */ + StatusReason?: string; + + /** + *Returns the error message if there is a rollback.
+ */ + RollbackFailureReason?: string; +} + +export namespace DeploymentModel { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentModel): any => ({ + ...obj, + }); +} + +/** + *Information about the result of a deployment on an edge device that is registered with SageMaker Edge Manager.
+ */ +export interface DeploymentResult { + /** + *The name and unique ID of the deployment.
+ */ + DeploymentName?: string; + + /** + *Returns the bucket error code.
+ */ + DeploymentStatus?: string; + + /** + *Returns the detailed error message.
+ */ + DeploymentStatusMessage?: string; + + /** + *The timestamp of when the deployment was started on the agent.
+ */ + DeploymentStartTime?: Date; + + /** + *The timestamp of when the deployment was ended, and the agent got the deployment results.
+ */ + DeploymentEndTime?: Date; + + /** + *Returns a list of models deployed on the agent.
+ */ + DeploymentModels?: DeploymentModel[]; +} + +export namespace DeploymentResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentResult): any => ({ + ...obj, + }); +} + /** *Information about a model deployed on an edge device that is registered with SageMaker Edge Manager.
*/ @@ -165,6 +415,11 @@ export interface SendHeartbeatRequest { *The name of the fleet that the device belongs to.
*/ DeviceFleetName: string | undefined; + + /** + *Returns the result of a deployment on the device.
+ */ + DeploymentResult?: DeploymentResult; } export namespace SendHeartbeatRequest { diff --git a/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts b/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts index 9708ee997581d..2b3e569406c96 100644 --- a/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts +++ b/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts @@ -13,14 +13,49 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; +import { GetDeploymentsCommandInput, GetDeploymentsCommandOutput } from "../commands/GetDeploymentsCommand"; import { GetDeviceRegistrationCommandInput, GetDeviceRegistrationCommandOutput, } from "../commands/GetDeviceRegistrationCommand"; import { SendHeartbeatCommandInput, SendHeartbeatCommandOutput } from "../commands/SendHeartbeatCommand"; -import { EdgeMetric, InternalServiceException, Model } from "../models/models_0"; +import { + Checksum, + Definition, + DeploymentModel, + DeploymentResult, + EdgeDeployment, + EdgeMetric, + InternalServiceException, + Model, +} from "../models/models_0"; import { SagemakerEdgeServiceException as __BaseException } from "../models/SagemakerEdgeServiceException"; +export const serializeAws_restJson1GetDeploymentsCommand = async ( + input: GetDeploymentsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetDeployments"; + let body: any; + body = JSON.stringify({ + ...(input.DeviceFleetName != null && { DeviceFleetName: input.DeviceFleetName }), + ...(input.DeviceName != null && { DeviceName: input.DeviceName }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1GetDeviceRegistrationCommand = async ( input: GetDeviceRegistrationCommandInput, context: __SerdeContext @@ -59,6 +94,9 @@ export const serializeAws_restJson1SendHeartbeatCommand = async ( body = JSON.stringify({ ...(input.AgentMetrics != null && { AgentMetrics: serializeAws_restJson1EdgeMetrics(input.AgentMetrics, context) }), ...(input.AgentVersion != null && { AgentVersion: input.AgentVersion }), + ...(input.DeploymentResult != null && { + DeploymentResult: serializeAws_restJson1DeploymentResult(input.DeploymentResult, context), + }), ...(input.DeviceFleetName != null && { DeviceFleetName: input.DeviceFleetName }), ...(input.DeviceName != null && { DeviceName: input.DeviceName }), ...(input.Models != null && { Models: serializeAws_restJson1Models(input.Models, context) }), @@ -74,6 +112,51 @@ export const serializeAws_restJson1SendHeartbeatCommand = async ( }); }; +export const deserializeAws_restJson1GetDeploymentsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseSageMaker Edge Manager dataplane service for communicating with active agents.
", "smithy.api#title": "Amazon Sagemaker Edge Manager" - } + }, + "version": "2020-09-23", + "operations": [ + { + "target": "com.amazonaws.sagemakeredge#GetDeployments" + }, + { + "target": "com.amazonaws.sagemakeredge#GetDeviceRegistration" + }, + { + "target": "com.amazonaws.sagemakeredge#SendHeartbeat" + } + ] }, "com.amazonaws.sagemakeredge#CacheTTLSeconds": { "type": "string", @@ -65,6 +68,217 @@ } } }, + "com.amazonaws.sagemakeredge#Checksum": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.sagemakeredge#ChecksumType", + "traits": { + "smithy.api#documentation": "The type of the checksum.
" + } + }, + "Sum": { + "target": "com.amazonaws.sagemakeredge#ChecksumString", + "traits": { + "smithy.api#documentation": "The checksum of the model.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the checksum of a model deployed on a device.
" + } + }, + "com.amazonaws.sagemakeredge#ChecksumString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 63 + }, + "smithy.api#pattern": "^[a-z0-9](-*[a-z0-9])*$" + } + }, + "com.amazonaws.sagemakeredge#ChecksumType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SHA1", + "name": "Sha1" + } + ] + } + }, + "com.amazonaws.sagemakeredge#Definition": { + "type": "structure", + "members": { + "ModelHandle": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "The unique model handle.
" + } + }, + "S3Url": { + "target": "com.amazonaws.sagemakeredge#S3Uri", + "traits": { + "smithy.api#documentation": "The absolute S3 location of the model.
" + } + }, + "Checksum": { + "target": "com.amazonaws.sagemakeredge#Checksum", + "traits": { + "smithy.api#documentation": "The checksum information of the model.
" + } + }, + "State": { + "target": "com.amazonaws.sagemakeredge#ModelState", + "traits": { + "smithy.api#documentation": "The desired state of the model.
" + } + } + }, + "traits": { + "smithy.api#documentation": "" + } + }, + "com.amazonaws.sagemakeredge#Definitions": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakeredge#Definition" + } + }, + "com.amazonaws.sagemakeredge#DeploymentModel": { + "type": "structure", + "members": { + "ModelHandle": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "The unique handle of the model.
" + } + }, + "ModelName": { + "target": "com.amazonaws.sagemakeredge#ModelName", + "traits": { + "smithy.api#documentation": "The name of the model.
" + } + }, + "ModelVersion": { + "target": "com.amazonaws.sagemakeredge#Version", + "traits": { + "smithy.api#documentation": "The version of the model.
" + } + }, + "DesiredState": { + "target": "com.amazonaws.sagemakeredge#ModelState", + "traits": { + "smithy.api#documentation": "The desired state of the model.
" + } + }, + "State": { + "target": "com.amazonaws.sagemakeredge#ModelState", + "traits": { + "smithy.api#documentation": "Returns the current state of the model.
" + } + }, + "Status": { + "target": "com.amazonaws.sagemakeredge#DeploymentStatus", + "traits": { + "smithy.api#documentation": "Returns the deployment status of the model.
" + } + }, + "StatusReason": { + "target": "com.amazonaws.sagemakeredge#String", + "traits": { + "smithy.api#documentation": "Returns the error message for the deployment status result.
" + } + }, + "RollbackFailureReason": { + "target": "com.amazonaws.sagemakeredge#String", + "traits": { + "smithy.api#documentation": "Returns the error message if there is a rollback.
" + } + } + }, + "traits": { + "smithy.api#documentation": "" + } + }, + "com.amazonaws.sagemakeredge#DeploymentModels": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakeredge#DeploymentModel" + } + }, + "com.amazonaws.sagemakeredge#DeploymentResult": { + "type": "structure", + "members": { + "DeploymentName": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "The name and unique ID of the deployment.
" + } + }, + "DeploymentStatus": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "Returns the bucket error code.
" + } + }, + "DeploymentStatusMessage": { + "target": "com.amazonaws.sagemakeredge#String", + "traits": { + "smithy.api#documentation": "Returns the detailed error message.
" + } + }, + "DeploymentStartTime": { + "target": "com.amazonaws.sagemakeredge#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp of when the deployment was started on the agent.
" + } + }, + "DeploymentEndTime": { + "target": "com.amazonaws.sagemakeredge#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp of when the deployment was ended, and the agent got the deployment results.
" + } + }, + "DeploymentModels": { + "target": "com.amazonaws.sagemakeredge#DeploymentModels", + "traits": { + "smithy.api#documentation": "Returns a list of models deployed on the agent.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the result of a deployment on an edge device that is registered with SageMaker Edge Manager.
" + } + }, + "com.amazonaws.sagemakeredge#DeploymentStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SUCCESS", + "name": "Success" + }, + { + "value": "FAIL", + "name": "Fail" + } + ] + } + }, + "com.amazonaws.sagemakeredge#DeploymentType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Model", + "name": "Model" + } + ] + } + }, "com.amazonaws.sagemakeredge#DeviceFleetName": { "type": "string", "traits": { @@ -104,6 +318,44 @@ "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9\\/])*$" } }, + "com.amazonaws.sagemakeredge#EdgeDeployment": { + "type": "structure", + "members": { + "DeploymentName": { + "target": "com.amazonaws.sagemakeredge#EntityName", + "traits": { + "smithy.api#documentation": "The name and unique ID of the deployment.
" + } + }, + "Type": { + "target": "com.amazonaws.sagemakeredge#DeploymentType", + "traits": { + "smithy.api#documentation": "The type of the deployment.
" + } + }, + "FailureHandlingPolicy": { + "target": "com.amazonaws.sagemakeredge#FailureHandlingPolicy", + "traits": { + "smithy.api#documentation": "Determines whether to rollback to previous configuration if deployment fails.
" + } + }, + "Definitions": { + "target": "com.amazonaws.sagemakeredge#Definitions", + "traits": { + "smithy.api#documentation": "Returns a list of Definition objects.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about a deployment on an edge device that is registered with SageMaker Edge Manager.
" + } + }, + "com.amazonaws.sagemakeredge#EdgeDeployments": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakeredge#EdgeDeployment" + } + }, "com.amazonaws.sagemakeredge#EdgeMetric": { "type": "structure", "members": { @@ -142,9 +394,86 @@ "target": "com.amazonaws.sagemakeredge#EdgeMetric" } }, + "com.amazonaws.sagemakeredge#EntityName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 63 + }, + "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + } + }, "com.amazonaws.sagemakeredge#ErrorMessage": { "type": "string" }, + "com.amazonaws.sagemakeredge#FailureHandlingPolicy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ROLLBACK_ON_FAILURE", + "name": "RollbackOnFailure" + }, + { + "value": "DO_NOTHING", + "name": "DoNothing" + } + ] + } + }, + "com.amazonaws.sagemakeredge#GetDeployments": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemakeredge#GetDeploymentsRequest" + }, + "output": { + "target": "com.amazonaws.sagemakeredge#GetDeploymentsResult" + }, + "errors": [ + { + "target": "com.amazonaws.sagemakeredge#InternalServiceException" + } + ], + "traits": { + "smithy.api#documentation": "Use to get the active deployments from a device.
", + "smithy.api#http": { + "method": "POST", + "uri": "/GetDeployments", + "code": 200 + } + } + }, + "com.amazonaws.sagemakeredge#GetDeploymentsRequest": { + "type": "structure", + "members": { + "DeviceName": { + "target": "com.amazonaws.sagemakeredge#DeviceName", + "traits": { + "smithy.api#documentation": "The unique name of the device you want to get the configuration of active deployments from.
", + "smithy.api#required": {} + } + }, + "DeviceFleetName": { + "target": "com.amazonaws.sagemakeredge#DeviceFleetName", + "traits": { + "smithy.api#documentation": "The name of the fleet that the device belongs to.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.sagemakeredge#GetDeploymentsResult": { + "type": "structure", + "members": { + "Deployments": { + "target": "com.amazonaws.sagemakeredge#EdgeDeployments", + "traits": { + "smithy.api#documentation": "Returns a list of the configurations of the active deployments on the device.
" + } + } + } + }, "com.amazonaws.sagemakeredge#GetDeviceRegistration": { "type": "operation", "input": { @@ -211,7 +540,7 @@ } }, "traits": { - "smithy.api#documentation": "An internal failure occurred. Try your request again. If the problem \n persists, contact AWS customer support.
", + "smithy.api#documentation": "An internal failure occurred. Try your request again. If the problem \n persists, contact Amazon Web Services customer support.
", "smithy.api#error": "client" } }, @@ -273,17 +602,45 @@ "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, + "com.amazonaws.sagemakeredge#ModelState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DEPLOY", + "name": "Deploy" + }, + { + "value": "UNDEPLOY", + "name": "Undeploy" + } + ] + } + }, "com.amazonaws.sagemakeredge#Models": { "type": "list", "member": { "target": "com.amazonaws.sagemakeredge#Model" } }, + "com.amazonaws.sagemakeredge#S3Uri": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^s3://([^/]+)/?(.*)$" + } + }, "com.amazonaws.sagemakeredge#SendHeartbeat": { "type": "operation", "input": { "target": "com.amazonaws.sagemakeredge#SendHeartbeatRequest" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.sagemakeredge#InternalServiceException" @@ -333,9 +690,18 @@ "smithy.api#documentation": "The name of the fleet that the device belongs to.
", "smithy.api#required": {} } + }, + "DeploymentResult": { + "target": "com.amazonaws.sagemakeredge#DeploymentResult", + "traits": { + "smithy.api#documentation": "Returns the result of a deployment on the device.
" + } } } }, + "com.amazonaws.sagemakeredge#String": { + "type": "string" + }, "com.amazonaws.sagemakeredge#Timestamp": { "type": "timestamp" }, @@ -349,7 +715,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": "[a-zA-Z0-9\\ \\_\\.]+" + "smithy.api#pattern": "^[a-zA-Z0-9\\ \\_\\.]+$" } } } From 559d71c013f09729128e24ae32a8e0f1090c8520 Mon Sep 17 00:00:00 2001 From: awstoolsAttaches the specified policy to the specified principal (certificate or other * credential).
*- * Note: This action is deprecated. Please use AttachPolicy instead.
+ * Note: This action is deprecated and works as + * expected for backward compatibility, but we won't add enhancements. Use AttachPolicy instead. *Requires permission to access the AttachPrincipalPolicy action.
*/ public attachPrincipalPolicy( diff --git a/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts b/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts index 8726d82600320..603940ea7676a 100644 --- a/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts +++ b/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts @@ -28,7 +28,8 @@ export interface AttachPrincipalPolicyCommandOutput extends __MetadataBearer {} *Attaches the specified policy to the specified principal (certificate or other * credential).
*- * Note: This action is deprecated. Please use AttachPolicy instead.
+ * Note: This action is deprecated and works as + * expected for backward compatibility, but we won't add enhancements. Use AttachPolicy instead. *Requires permission to access the AttachPrincipalPolicy action.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts b/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts index ef010b34bb60f..bd513d1b7f893 100644 --- a/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts +++ b/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts @@ -13,7 +13,8 @@ import { } from "@aws-sdk/types"; import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { ListThingGroupsForThingRequest, ListThingGroupsForThingResponse } from "../models/models_1"; +import { ListThingGroupsForThingRequest } from "../models/models_1"; +import { ListThingGroupsForThingResponse } from "../models/models_2"; import { deserializeAws_restJson1ListThingGroupsForThingCommand, serializeAws_restJson1ListThingGroupsForThingCommand, diff --git a/clients/client-iot/src/models/models_1.ts b/clients/client-iot/src/models/models_1.ts index bce1915d84561..4abf0e237ff7e 100644 --- a/clients/client-iot/src/models/models_1.ts +++ b/clients/client-iot/src/models/models_1.ts @@ -4181,6 +4181,30 @@ export enum DeviceDefenderIndexingMode { VIOLATIONS = "VIOLATIONS", } +/** + *Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter.
+ * To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode
to be ON
and
+ * specify your shadow names in filter
.
The shadow names that you select to index. The default maximum number of shadow names for indexing is 10. To increase + * the limit, see Amazon Web Services IoT Device Management + * Quotas in the Amazon Web Services General Reference. + *
+ */ + namedShadowNames?: string[]; +} + +export namespace IndexingFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IndexingFilter): any => ({ + ...obj, + }); +} + export enum NamedShadowIndexingMode { OFF = "OFF", ON = "ON", @@ -4277,6 +4301,13 @@ export interface ThingIndexingConfiguration { *Contains custom field names and their data type.
*/ customFields?: Field[]; + + /** + *Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter.
+ * To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode
to be ON
and
+ * specify your shadow names in filter
.
The thing groups.
- */ - thingGroups?: GroupNameAndArn[]; - - /** - *The token to use to get the next set of results, or null if there are no additional results.
- */ - nextToken?: string; -} - -export namespace ListThingGroupsForThingResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListThingGroupsForThingResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-iot/src/models/models_2.ts b/clients/client-iot/src/models/models_2.ts index 2f65553f98bb6..2e3cd056246d8 100644 --- a/clients/client-iot/src/models/models_2.ts +++ b/clients/client-iot/src/models/models_2.ts @@ -49,6 +49,7 @@ import { Configuration, DetectMitigationActionsTaskTarget, DomainConfigurationStatus, + GroupNameAndArn, LogTargetType, RegistrationConfig, Status, @@ -58,6 +59,27 @@ import { ViolationEventOccurrenceRange, } from "./models_1"; +export interface ListThingGroupsForThingResponse { + /** + *The thing groups.
+ */ + thingGroups?: GroupNameAndArn[]; + + /** + *The token to use to get the next set of results, or null if there are no additional results.
+ */ + nextToken?: string; +} + +export namespace ListThingGroupsForThingResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListThingGroupsForThingResponse): any => ({ + ...obj, + }); +} + /** *The input for the ListThingPrincipal operation.
*/ diff --git a/clients/client-iot/src/protocols/Aws_restJson1.ts b/clients/client-iot/src/protocols/Aws_restJson1.ts index 64c4ced0aab26..630e41d22bffe 100644 --- a/clients/client-iot/src/protocols/Aws_restJson1.ts +++ b/clients/client-iot/src/protocols/Aws_restJson1.ts @@ -837,6 +837,7 @@ import { Field, FleetMetricNameAndArn, GroupNameAndArn, + IndexingFilter, InternalServerException, Job, JobExecution, @@ -24326,6 +24327,14 @@ const serializeAws_restJson1HttpUrlDestinationConfiguration = ( }; }; +const serializeAws_restJson1IndexingFilter = (input: IndexingFilter, context: __SerdeContext): any => { + return { + ...(input.namedShadowNames != null && { + namedShadowNames: serializeAws_restJson1NamedShadowNamesFilter(input.namedShadowNames, context), + }), + }; +}; + const serializeAws_restJson1IotAnalyticsAction = (input: IotAnalyticsAction, context: __SerdeContext): any => { return { ...(input.batchMode != null && { batchMode: input.batchMode }), @@ -24523,6 +24532,17 @@ const serializeAws_restJson1MqttContext = (input: MqttContext, context: __SerdeC }; }; +const serializeAws_restJson1NamedShadowNamesFilter = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1NumberList = (input: number[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -25095,6 +25115,7 @@ const serializeAws_restJson1ThingIndexingConfiguration = ( return { ...(input.customFields != null && { customFields: serializeAws_restJson1Fields(input.customFields, context) }), ...(input.deviceDefenderIndexingMode != null && { deviceDefenderIndexingMode: input.deviceDefenderIndexingMode }), + ...(input.filter != null && { filter: serializeAws_restJson1IndexingFilter(input.filter, context) }), ...(input.managedFields != null && { managedFields: serializeAws_restJson1Fields(input.managedFields, context) }), ...(input.namedShadowIndexingMode != null && { namedShadowIndexingMode: input.namedShadowIndexingMode }), ...(input.thingConnectivityIndexingMode != null && { @@ -26777,6 +26798,15 @@ const deserializeAws_restJson1ImplicitDeny = (output: any, context: __SerdeConte } as any; }; +const deserializeAws_restJson1IndexingFilter = (output: any, context: __SerdeContext): IndexingFilter => { + return { + namedShadowNames: + output.namedShadowNames != null + ? deserializeAws_restJson1NamedShadowNamesFilter(output.namedShadowNames, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1IndexNamesList = (output: any, context: __SerdeContext): string[] => { const retVal = (output || []) .filter((e: any) => e != null) @@ -27353,6 +27383,18 @@ const deserializeAws_restJson1MitigationActionParams = ( } as any; }; +const deserializeAws_restJson1NamedShadowNamesFilter = (output: any, context: __SerdeContext): string[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); + return retVal; +}; + const deserializeAws_restJson1NonCompliantResource = (output: any, context: __SerdeContext): NonCompliantResource => { return { additionalInfo: @@ -28581,6 +28623,7 @@ const deserializeAws_restJson1ThingIndexingConfiguration = ( customFields: output.customFields != null ? deserializeAws_restJson1Fields(output.customFields, context) : undefined, deviceDefenderIndexingMode: __expectString(output.deviceDefenderIndexingMode), + filter: output.filter != null ? deserializeAws_restJson1IndexingFilter(output.filter, context) : undefined, managedFields: output.managedFields != null ? deserializeAws_restJson1Fields(output.managedFields, context) : undefined, namedShadowIndexingMode: __expectString(output.namedShadowIndexingMode), diff --git a/codegen/sdk-codegen/aws-models/iot.json b/codegen/sdk-codegen/aws-models/iot.json index 36cabcaa02df2..5819e250b6a4e 100644 --- a/codegen/sdk-codegen/aws-models/iot.json +++ b/codegen/sdk-codegen/aws-models/iot.json @@ -1802,7 +1802,7 @@ ], "traits": { "smithy.api#deprecated": {}, - "smithy.api#documentation": "Attaches the specified policy to the specified principal (certificate or other\n credential).
\n\n Note: This action is deprecated. Please use AttachPolicy instead.
\nRequires permission to access the AttachPrincipalPolicy action.
", + "smithy.api#documentation": "Attaches the specified policy to the specified principal (certificate or other\n credential).
\n\n Note: This action is deprecated and works as\n expected for backward compatibility, but we won't add enhancements. Use AttachPolicy instead.
\nRequires permission to access the AttachPrincipalPolicy action.
", "smithy.api#http": { "method": "PUT", "uri": "/principal-policies/{policyName}", @@ -15615,6 +15615,20 @@ ] } }, + "com.amazonaws.iot#IndexingFilter": { + "type": "structure", + "members": { + "namedShadowNames": { + "target": "com.amazonaws.iot#NamedShadowNamesFilter", + "traits": { + "smithy.api#documentation": "The shadow names that you select to index. The default maximum number of shadow names for indexing is 10. To increase \n the limit, see Amazon Web Services IoT Device Management \n Quotas in the Amazon Web Services General Reference.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter.\n To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode
to be ON
and \n specify your shadow names in filter
.
Contains custom field names and their data type.
" } + }, + "filter": { + "target": "com.amazonaws.iot#IndexingFilter", + "traits": { + "smithy.api#documentation": "Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter.\n To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode
to be ON
and \n specify your shadow names in filter
.
To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase, log into your Amazon Web Services account and visit the Service Quotas console.
diff --git a/clients/client-acm-pca/src/ACMPCA.ts b/clients/client-acm-pca/src/ACMPCA.ts index 6531ecdc08ccb..c478e5b669cfd 100644 --- a/clients/client-acm-pca/src/ACMPCA.ts +++ b/clients/client-acm-pca/src/ACMPCA.ts @@ -120,7 +120,6 @@ import { * request because the request exceeds the operation's quota for the number of requests per * second. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request * rate for APIs. - * *To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase, * log into your Amazon Web Services account and visit the Service Quotas * console.
diff --git a/clients/client-acm-pca/src/ACMPCAClient.ts b/clients/client-acm-pca/src/ACMPCAClient.ts index 177eef6c20351..d4ec18c8d9bef 100644 --- a/clients/client-acm-pca/src/ACMPCAClient.ts +++ b/clients/client-acm-pca/src/ACMPCAClient.ts @@ -334,7 +334,6 @@ export interface ACMPCAClientResolvedConfig extends ACMPCAClientResolvedConfigTy * request because the request exceeds the operation's quota for the number of requests per * second. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request * rate for APIs. - * *To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase, * log into your Amazon Web Services account and visit the Service Quotas * console.
diff --git a/clients/client-acm-pca/src/models/models_0.ts b/clients/client-acm-pca/src/models/models_0.ts index 31453c3b2c72a..4dc3caedf3c79 100644 --- a/clients/client-acm-pca/src/models/models_0.ts +++ b/clients/client-acm-pca/src/models/models_0.ts @@ -8,17 +8,14 @@ import { ACMPCAServiceException as __BaseException } from "./ACMPCAServiceExcept */ export interface CustomAttribute { /** - *Specifies the object identifier (OID) of the attribute type of - * the - * relative distinguished name - * (RDN).
+ *Specifies the object identifier (OID) of the attribute type of the relative + * distinguished name (RDN).
*/ ObjectIdentifier: string | undefined; /** * - *Specifies the attribute value of relative distinguished name - * (RDN).
+ *Specifies the attribute value of relative distinguished name (RDN).
*/ Value: string | undefined; } @@ -125,16 +122,9 @@ export interface ASN1Subject { /** * - *Contains a sequence of one or more X.500 relative distinguished - * names - * (RDNs), - * each of which consists of an object identifier (OID) and - * a - * value. For more information, see NIST’s definition of - * Object - * Identifier - * (OID).
- * + *Contains a sequence of one or more X.500 relative distinguished names (RDNs), each of + * which consists of an object identifier (OID) and a value. For more information, see + * NIST’s definition of Object Identifier (OID).
*Custom attributes cannot be used in combination with standard attributes.
*A CRL is typically updated approximately 30 minutes after a certificate * is revoked. If for any reason a CRL update fails, ACM Private CA makes further attempts * every 15 minutes.
- * *CRLs contain the following fields:
*Specifies a - * cryptographic key management compliance standard used for handling CA keys.
+ *Specifies a cryptographic key management compliance standard used for handling CA + * keys.
*Default: FIPS_140_2_LEVEL_3_OR_HIGHER
- *Note: FIPS_140_2_LEVEL_3_OR_HIGHER
is not supported in Region
- * ap-northeast-3. When creating a CA in the ap-northeast-3, you must provide
+ *
+ * Note:
+ * FIPS_140_2_LEVEL_3_OR_HIGHER
is not supported in the following
+ * Regions:
ap-northeast-3
+ *ap-southeast-3
+ *When creating a CA in these Regions, you must provide
* FIPS_140_2_LEVEL_2_OR_HIGHER
as the argument for
* KeyStorageSecurityStandard
. Failure to do this results in an
* InvalidArgsException
with the message, "A certificate authority cannot
@@ -1819,8 +1818,7 @@ export namespace PolicyInformation {
/**
*
Specifies the X.509 extension information for a - * certificate.
+ *Specifies the X.509 extension information for a certificate.
*Extensions present in Specifies the object identifier (OID) of the X.509 extension. For more information,
- * see the
- * Global OID reference
- * database.
+ * Specifies the object identifier (OID) of the X.509 extension. For more information, see the
+ * Global OID reference database.
* Specifies the base64-encoded value of the X.509
- * extension. Specifies the base64-encoded value of the X.509 extension. Specifies the critical flag of
- * the
- * X.509
- * extension. Specifies the critical flag of the X.509 extension. Contains a sequence of one or more X.509 extensions, each of which consists of an
- * object identifier (OID), a base64-encoded
- * value,
- * and the
- * critical flag.
- * For
- * more information, see the Global OID reference
+ * object identifier (OID), a base64-encoded value, and the critical flag. For more
+ * information, see the Global OID reference
* database.
* The OID value of a CustomExtension must not
- * match the OID of a predefined extension. This is the Certificate Manager Private Certificate Authority (PCA) API Reference. It provides descriptions,\n\t\t\tsyntax, and usage examples for each of the actions and data types involved in creating\n\t\t\tand managing a private certificate authority (CA) for your organization. The documentation for each action shows the API request parameters and the JSON\n\t\t\tresponse. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is\n\t\t\ttailored to the programming language or platform that you prefer. For more information,\n\t\t\tsee Amazon Web Services SDKs. Each ACM Private CA API operation has a quota that determines the number of times the\n\t\t\toperation can be called per second. ACM Private CA throttles API requests at different rates\n\t\t\tdepending on the operation. Throttling means that ACM Private CA rejects an otherwise valid\n\t\t\trequest because the request exceeds the operation's quota for the number of requests per\n\t\t\tsecond. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request\n\t\t\trate for APIs. To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase,\n\t\t\tlog into your Amazon Web Services account and visit the Service Quotas\n\t\t\tconsole. This is the Certificate Manager Private Certificate Authority (PCA) API Reference. It provides descriptions,\n\t\t\tsyntax, and usage examples for each of the actions and data types involved in creating\n\t\t\tand managing a private certificate authority (CA) for your organization. The documentation for each action shows the API request parameters and the JSON\n\t\t\tresponse. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is\n\t\t\ttailored to the programming language or platform that you prefer. For more information,\n\t\t\tsee Amazon Web Services SDKs. Each ACM Private CA API operation has a quota that determines the number of times the\n\t\t\toperation can be called per second. ACM Private CA throttles API requests at different rates\n\t\t\tdepending on the operation. Throttling means that ACM Private CA rejects an otherwise valid\n\t\t\trequest because the request exceeds the operation's quota for the number of requests per\n\t\t\tsecond. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request\n\t\t\trate for APIs. To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase,\n\t\t\tlog into your Amazon Web Services account and visit the Service Quotas\n\t\t\tconsole. Contains a sequence of one or more X.500 relative distinguished\n\t\t\tnames\n\t\t\t(RDNs),\n\t\t\teach of which consists of an object identifier (OID) and\n\t\t\ta\n\t\t\tvalue. For more information, see NIST’s definition of \n\t\t\tObject\n\t\t\t\tIdentifier\n\t\t\t\t(OID). Custom attributes cannot be used in combination with standard attributes. Contains a sequence of one or more X.500 relative distinguished names (RDNs), each of\n\t\t\twhich consists of an object identifier (OID) and a value. For more information, see\n\t\t\tNIST’s definition of Object Identifier (OID). Custom attributes cannot be used in combination with standard attributes. Specifies a\n\t\t\tcryptographic key management compliance standard used for handling CA keys. Default: FIPS_140_2_LEVEL_3_OR_HIGHER Note: Specifies a cryptographic key management compliance standard used for handling CA\n\t\t\tkeys. Default: FIPS_140_2_LEVEL_3_OR_HIGHER \n Note:\n\t\t\t ap-northeast-3 ap-southeast-3 When creating a CA in these Regions, you must provide\n\t\t\t\t Contains configuration information for a certificate revocation list (CRL). Your\n\t\t\tprivate certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You\n\t\t\tcan enable CRLs for your new or an existing private CA by setting the Enabled parameter to ACM Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your\n\t\t\tCRLs. Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a\n\t\t\tcertificate's expiration date or when a certificate is revoked. When a certificate is\n\t\t\trevoked, it appears in the CRL until the certificate expires, and then in one additional\n\t\t\tCRL after expiration, and it always appears in the audit report. A CRL is typically updated approximately 30 minutes after a certificate \n\tis revoked. If for any reason a CRL update fails, ACM Private CA makes further attempts \n\tevery 15 minutes. CRLs contain the following fields: \n Version: The current version number defined\n\t\t\t\t\tin RFC 5280 is V2. The integer value is 0x1. \n Signature Algorithm: The name of the\n\t\t\t\t\talgorithm used to sign the CRL. \n Issuer: The X.500 distinguished name of your\n\t\t\t\t\tprivate CA that issued the CRL. \n Last Update: The issue date and time of this\n\t\t\t\t\tCRL. \n Next Update: The day and time by which the\n\t\t\t\t\tnext CRL will be issued. \n Revoked Certificates: List of revoked\n\t\t\t\t\tcertificates. Each list item contains the following information. \n Serial Number: The serial number, in\n\t\t\t\t\t\t\thexadecimal format, of the revoked certificate. \n Revocation Date: Date and time the\n\t\t\t\t\t\t\tcertificate was revoked. \n CRL Entry Extensions: Optional\n\t\t\t\t\t\t\textensions for the CRL entry. \n X509v3 CRL Reason Code:\n\t\t\t\t\t\t\t\t\tReason the certificate was revoked. \n CRL Extensions: Optional extensions for the\n\t\t\t\t\tCRL. \n X509v3 Authority Key Identifier:\n\t\t\t\t\t\t\tIdentifies the public key associated with the private key used to sign\n\t\t\t\t\t\t\tthe certificate. \n X509v3 CRL Number:: Decimal sequence\n\t\t\t\t\t\t\tnumber for the CRL. \n Signature Algorithm: Algorithm used by your\n\t\t\t\t\tprivate CA to sign the CRL. \n Signature Value: Signature computed over the\n\t\t\t\t\tCRL. Certificate revocation lists created by ACM Private CA are DER-encoded. You can use the\n\t\t\tfollowing OpenSSL command to list a CRL. \n For more information, see Planning a certificate revocation list\n\t\t\t\t(CRL) in the Certificate Manager Private Certificate Authority (PCA) User Guide\n Contains configuration information for a certificate revocation list (CRL). Your\n\t\t\tprivate certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You\n\t\t\tcan enable CRLs for your new or an existing private CA by setting the Enabled parameter to ACM Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your\n\t\t\tCRLs. Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a\n\t\t\tcertificate's expiration date or when a certificate is revoked. When a certificate is\n\t\t\trevoked, it appears in the CRL until the certificate expires, and then in one additional\n\t\t\tCRL after expiration, and it always appears in the audit report. A CRL is typically updated approximately 30 minutes after a certificate \n\tis revoked. If for any reason a CRL update fails, ACM Private CA makes further attempts \n\tevery 15 minutes. CRLs contain the following fields: \n Version: The current version number defined\n\t\t\t\t\tin RFC 5280 is V2. The integer value is 0x1. \n Signature Algorithm: The name of the\n\t\t\t\t\talgorithm used to sign the CRL. \n Issuer: The X.500 distinguished name of your\n\t\t\t\t\tprivate CA that issued the CRL. \n Last Update: The issue date and time of this\n\t\t\t\t\tCRL. \n Next Update: The day and time by which the\n\t\t\t\t\tnext CRL will be issued. \n Revoked Certificates: List of revoked\n\t\t\t\t\tcertificates. Each list item contains the following information. \n Serial Number: The serial number, in\n\t\t\t\t\t\t\thexadecimal format, of the revoked certificate. \n Revocation Date: Date and time the\n\t\t\t\t\t\t\tcertificate was revoked. \n CRL Entry Extensions: Optional\n\t\t\t\t\t\t\textensions for the CRL entry. \n X509v3 CRL Reason Code:\n\t\t\t\t\t\t\t\t\tReason the certificate was revoked. \n CRL Extensions: Optional extensions for the\n\t\t\t\t\tCRL. \n X509v3 Authority Key Identifier:\n\t\t\t\t\t\t\tIdentifies the public key associated with the private key used to sign\n\t\t\t\t\t\t\tthe certificate. \n X509v3 CRL Number:: Decimal sequence\n\t\t\t\t\t\t\tnumber for the CRL. \n Signature Algorithm: Algorithm used by your\n\t\t\t\t\tprivate CA to sign the CRL. \n Signature Value: Signature computed over the\n\t\t\t\t\tCRL. Certificate revocation lists created by ACM Private CA are DER-encoded. You can use the\n\t\t\tfollowing OpenSSL command to list a CRL. \n For more information, see Planning a certificate revocation list\n\t\t\t\t(CRL) in the Certificate Manager Private Certificate Authority (PCA) User Guide\n Specifies the object identifier (OID) of the attribute type of\n\t\t\tthe\n\t\t\trelative distinguished name\n\t\t\t(RDN). Specifies the object identifier (OID) of the attribute type of the relative\n\t\t\tdistinguished name (RDN). Specifies the attribute value of relative distinguished name\n\t\t\t(RDN). Specifies the attribute value of relative distinguished name (RDN). Specifies the object identifier (OID) of the X.509 extension. For more information,\n\t\t\tsee the\n\t\t\t\tGlobal OID reference\n\t\t\t\tdatabase.\n\t\t Specifies the object identifier (OID) of the X.509 extension. For more information, see the\n\t\t\t\tGlobal OID reference database.\n\t\t Specifies the base64-encoded value of the X.509\n\t\t\textension. Specifies the base64-encoded value of the X.509 extension. Specifies the critical flag of\n\t\t\tthe\n\t\t\tX.509\n\t\t\textension. Specifies the critical flag of the X.509 extension. Specifies the X.509 extension information for a\n\t\t\tcertificate. Extensions present in Specifies the X.509 extension information for a certificate. Extensions present in Contains a sequence of one or more X.509 extensions, each of which consists of an\n\t\t\tobject identifier (OID), a base64-encoded\n\t\t\tvalue,\n\t\t\tand the\n\t\t\tcritical flag.\n\t\t\tFor\n\t\t\tmore information, see the Global OID reference\n\t\t\t\tdatabase.\n The OID value of a CustomExtension must not\n\t\t\t\tmatch the OID of a predefined extension. Contains a sequence of one or more X.509 extensions, each of which consists of an\n\t\t\tobject identifier (OID), a base64-encoded value, and the critical flag. For more\n\t\t\tinformation, see the Global OID reference\n\t\t\t\tdatabase.\n This API operation is in preview release for IoT SiteWise and is subject to change.
+ * We recommend that you use this operation only with test data, and not in production environments. Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information,
+ * see Create a bulk import job (CLI)
+ * in the Amazon Simple Storage Service User Guide. You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job.
+ * For more information about how to configure storage settings,
+ * see PutStorageConfiguration. Creates a dashboard in an IoT SiteWise Monitor project. This API operation is in preview release for IoT SiteWise and is subject to change.
+ * We recommend that you use this operation only with test data, and not in production environments. Retrieves information about a bulk import job request. For more information,
+ * see Describe a bulk import job (CLI)
+ * in the Amazon Simple Storage Service User Guide. Retrieves information about a dashboard. This API operation is in preview release for IoT SiteWise and is subject to change.
+ * We recommend that you use this operation only with test data, and not in production environments. Retrieves a paginated list of bulk import job requests. For more information,
+ * see List bulk import jobs (CLI)
+ * in the Amazon Simple Storage Service User Guide. Retrieves a paginated list of dashboards for an IoT SiteWise Monitor project. This API operation is in preview release for IoT SiteWise and is subject to change.
+ * We recommend that you use this operation only with test data, and not in production environments. Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information,
+ * see Create a bulk import job (CLI)
+ * in the Amazon Simple Storage Service User Guide. You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job.
+ * For more information about how to configure storage settings,
+ * see PutStorageConfiguration. This API operation is in preview release for IoT SiteWise and is subject to change.
+ * We recommend that you use this operation only with test data, and not in production environments. Retrieves information about a bulk import job request. For more information,
+ * see Describe a bulk import job (CLI)
+ * in the Amazon Simple Storage Service User Guide. This API operation is in preview release for IoT SiteWise and is subject to change.
+ * We recommend that you use this operation only with test data, and not in production environments. Retrieves a paginated list of bulk import job requests. For more information,
+ * see List bulk import jobs (CLI)
+ * in the Amazon Simple Storage Service User Guide. The Amazon S3 destination where errors associated with the job creation request are saved. The name of the Amazon S3 bucket to which errors associated with the bulk import job are sent. Amazon S3 uses the prefix as a folder name to organize data in the bucket.
+ * Each Amazon S3 object has a key that is its unique identifier in the bucket.
+ * Each object in a bucket has exactly one key. The prefix must end with a forward slash (/).
+ * For more information, see Organizing objects using prefixes
+ * in the Amazon Simple Storage Service User Guide. The file in Amazon S3 where your data is saved. The name of the Amazon S3 bucket from which data is imported. The key of the Amazon S3 object that contains your data. Each object has a key that is a
+ * unique identifier. Each object has exactly one key. The version ID to identify a specific version of the Amazon S3 object that contains your data. A .csv file. The column names specified in the .csv file. The file format of the data. The .csv file format. Contains the configuration information of a job, such as the file format used to save data in Amazon S3. The file format of the data in Amazon S3. The unique name that helps identify the job request. The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data. The files in the specified Amazon S3 bucket that contain your data. The Amazon S3 destination where errors associated with the job creation request are saved. Contains the configuration information of a job, such as the file format used to save data in Amazon S3. The ID of the job. The unique name that helps identify the job request. The status of the bulk import job can be one of following values.
+ *
+ *
+ *
+ *
+ *
+ * The ID of the project in which to create the dashboard. The ID of the job. The ID of the job. The unique name that helps identify the job request. The status of the bulk import job can be one of following values.
+ *
+ *
+ *
+ *
+ *
+ * The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data. The files in the specified Amazon S3 bucket that contain your data. The Amazon S3 destination where errors associated with the job creation request are saved. Contains the configuration information of a job, such as the file format used to save data in Amazon S3. The date the job was created, in Unix epoch TIME. The date the job was last updated, in Unix epoch time. The ID of the dashboard. The token to be used for the next set of paginated results. The maximum number of results to return for each paginated request. You can use a filter to select the bulk import jobs that you want to retrieve. Contains a job summary information. The ID of the job. The unique name that helps identify the job request. The status of the bulk import job can be one of following values.
+ *
+ *
+ *
+ *
+ *
+ * One or more job summaries to list. The token for the next set of results, or null if there are no additional results. The ID of the project. This API operation is in preview release for IoT SiteWise and is subject to change. \n We recommend that you use this operation only with test data, and not in production environments. Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, \n see Create a bulk import job (CLI) \n in the Amazon Simple Storage Service User Guide. You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job. \n For more information about how to configure storage settings, \n see PutStorageConfiguration. The unique name that helps identify the job request. The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data. The files in the specified Amazon S3 bucket that contain your data. The Amazon S3 destination where errors associated with the job creation request are saved. Contains the configuration information of a job, such as the file format used to save data in Amazon S3. The ID of the job. The unique name that helps identify the job request. The status of the bulk import job can be one of following values. \n \n \n \n \n \n The column names specified in the .csv file. A .csv file. This API operation is in preview release for IoT SiteWise and is subject to change. \n We recommend that you use this operation only with test data, and not in production environments. Retrieves information about a bulk import job request. For more information, \n see Describe a bulk import job (CLI) \n in the Amazon Simple Storage Service User Guide. The ID of the job. The ID of the job. The unique name that helps identify the job request. The status of the bulk import job can be one of following values. \n \n \n \n \n \n The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data. The files in the specified Amazon S3 bucket that contain your data. The Amazon S3 destination where errors associated with the job creation request are saved. Contains the configuration information of a job, such as the file format used to save data in Amazon S3. The date the job was created, in Unix epoch TIME. The date the job was last updated, in Unix epoch time. The name of the Amazon S3 bucket to which errors associated with the bulk import job are sent. Amazon S3 uses the prefix as a folder name to organize data in the bucket. \n Each Amazon S3 object has a key that is its unique identifier in the bucket. \n Each object in a bucket has exactly one key. The prefix must end with a forward slash (/). \n For more information, see Organizing objects using prefixes \n in the Amazon Simple Storage Service User Guide. The Amazon S3 destination where errors associated with the job creation request are saved. The name of the Amazon S3 bucket from which data is imported. The key of the Amazon S3 object that contains your data. Each object has a key that is a\n unique identifier. Each object has exactly one key. The version ID to identify a specific version of the Amazon S3 object that contains your data. The file in Amazon S3 where your data is saved. The .csv file format. The file format of the data. The file format of the data in Amazon S3. Contains the configuration information of a job, such as the file format used to save data in Amazon S3. The ID of the job. The unique name that helps identify the job request. The status of the bulk import job can be one of following values. \n \n \n \n \n \n Contains a job summary information. This API operation is in preview release for IoT SiteWise and is subject to change. \n We recommend that you use this operation only with test data, and not in production environments. Retrieves a paginated list of bulk import job requests. For more information, \n see List bulk import jobs (CLI) \n in the Amazon Simple Storage Service User Guide. The token to be used for the next set of paginated results. The maximum number of results to return for each paginated request. You can use a filter to select the bulk import jobs that you want to retrieve. One or more job summaries to list. The token for the next set of results, or null if there are no additional results. The time stamp of the last update to the alarm state. Tracks the timestamp of any state update, even if The state value for the alarm.
+ * The timestamp
+ * of the last change
+ * to the alarm's
+ * When the value is
+ * Captures the reason for action suppression.
+ *
+ * Actions will be suppressed
+ * if the suppressor alarm is
+ * in the
+ * The maximum time
+ * in seconds
+ * that the composite alarm waits
+ * for the suppressor alarm
+ * to go
+ * into the
+ *
+ * The maximum time
+ * in seconds
+ * that the composite alarm waits
+ * after suppressor alarm goes out
+ * of the
+ * CustomExtensions
follow the
* ApiPassthrough
* template
@@ -1829,27 +1827,21 @@ export namespace PolicyInformation {
export interface CustomExtension {
/**
*
- * FIPS_140_2_LEVEL_3_OR_HIGHER
is not supported in Region\n\t\t\tap-northeast-3. When creating a CA in the ap-northeast-3, you must provide\n\t\t\t\tFIPS_140_2_LEVEL_2_OR_HIGHER
as the argument for\n\t\t\t\tKeyStorageSecurityStandard
. Failure to do this results in an\n\t\t\t\tInvalidArgsException
with the message, \"A certificate authority cannot\n\t\t\tbe created in this region with the specified security standard.\"FIPS_140_2_LEVEL_3_OR_HIGHER
is not supported in the following\n\t\t\tRegions:\n
\n\t\t FIPS_140_2_LEVEL_2_OR_HIGHER
as the argument for\n\t\t\t\tKeyStorageSecurityStandard
. Failure to do this results in an\n\t\t\t\tInvalidArgsException
with the message, \"A certificate authority cannot\n\t\t\tbe created in this region with the specified security standard.\"true
. Your private CA\n\t\t\twrites CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by\n\t\t\tspecifying a value for the CustomCname parameter. Your\n\t\t\tprivate CA copies the CNAME or the S3 bucket name to the CRL\n\t\t\t\tDistribution Points extension of each certificate it issues. Your S3\n\t\t\tbucket policy must give write permission to ACM Private CA. \n
\n\t\t \n
\n\t\t\t \n
\n\t\t\t\t\t \n
\n\t\t\t openssl crl -inform DER -text -in crl_path\n\t\t\t-noout
\n true
. Your private CA\n\t\t\twrites CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by\n\t\t\tspecifying a value for the CustomCname parameter. Your\n\t\t\tprivate CA copies the CNAME or the S3 bucket name to the CRL\n\t\t\t\tDistribution Points extension of each certificate it issues. Your S3\n\t\t\tbucket policy must give write permission to ACM Private CA. \n
\n\t\t \n
\n\t\t\t \n
\n\t\t\t\t\t \n
\n\t\t\t openssl crl -inform DER -text -in crl_path\n\t\t\t-noout
\n CustomExtensions
follow the\n\t\t\t\tApiPassthrough
\n\t\t\t template\n\t\t\t\trules. CustomExtensions
follow the\n\t\t\t\tApiPassthrough
\n\t\t\t template\n\t\t\t\trules.
+ *
+ */
+ jobStatus: JobStatus | string | undefined;
+}
+
+export namespace CreateBulkImportJobResponse {
+ /**
+ * @internal
+ */
+ export const filterSensitiveLog = (obj: CreateBulkImportJobResponse): any => ({
+ ...obj,
+ });
+}
+
export interface CreateDashboardRequest {
/**
* PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.CANCELLED
– The bulk import job has been canceled.RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
+ *
+ */
+ jobStatus: JobStatus | string | undefined;
+
+ /**
+ * PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.CANCELLED
– The bulk import job has been canceled.RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
+ *
+ */
+ status: JobStatus | string | undefined;
+}
+
+export namespace JobSummary {
+ /**
+ * @internal
+ */
+ export const filterSensitiveLog = (obj: JobSummary): any => ({
+ ...obj,
+ });
+}
+
+export interface ListBulkImportJobsResponse {
+ /**
+ * PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.CANCELLED
– The bulk import job has been canceled.RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.\n
",
+ "smithy.api#required": {}
+ }
+ }
+ }
+ },
"com.amazonaws.iotsitewise#CreateDashboard": {
"type": "operation",
"input": {
@@ -3663,6 +3835,20 @@
}
}
},
+ "com.amazonaws.iotsitewise#Csv": {
+ "type": "structure",
+ "members": {
+ "columnNames": {
+ "target": "com.amazonaws.iotsitewise#ColumnNames",
+ "traits": {
+ "smithy.api#documentation": "PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.CANCELLED
– The bulk import job has been canceled.RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3. \n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors.\n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.\n
",
+ "smithy.api#required": {}
+ }
+ },
+ "jobRoleArn": {
+ "target": "com.amazonaws.iotsitewise#ARN",
+ "traits": {
+ "smithy.api#documentation": "PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.CANCELLED
– The bulk import job has been canceled.RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3. \n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors.\n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.\n
",
+ "smithy.api#required": {}
+ }
+ }
+ },
+ "traits": {
+ "smithy.api#documentation": "PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.CANCELLED
– The bulk import job has been canceled.RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3. \n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors.\n You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.StateValue
doesn't change.StateValue
.
+ * ALARM
,
+ * it means
+ * that the actions are suppressed
+ * because the suppressor alarm is
+ * in ALARM
+ * When the value is WaitPeriod
,
+ * it means that
+ * the actions are suppressed
+ * because the composite alarm is waiting
+ * for the suppressor alarm
+ * to go
+ * into
+ * into the ALARM
state.
+ * The maximum waiting time is as specified
+ * in ActionsSuppressorWaitPeriod
.
+ * After this time,
+ * the composite alarm performs its actions.
+ * When the value is ExtensionPeriod
,
+ * it means
+ * that the actions are suppressed
+ * because the composite alarm is waiting
+ * after the suppressor alarm went out
+ * of the ALARM
state.
+ * The maximum waiting time is as specified
+ * in ActionsSuppressorExtensionPeriod
.
+ * After this time,
+ * the composite alarm performs its actions.
+ * ALARM
state.
+ * ActionsSuppressor
can be an AlarmName or an Amazon Resource Name (ARN)
+ * from an existing alarm.
+ * ALARM
state.
+ * After this time,
+ * the composite alarm performs its actions.
+ * WaitPeriod
+ * is required only
+ * when ActionsSuppressor
is specified.
+ * ALARM
state.
+ * After this time,
+ * the composite alarm performs its actions.
+ * ExtensionPeriod
+ * is required only
+ * when ActionsSuppressor
is specified.
+ *
+ * Actions will be suppressed
+ * if the suppressor alarm is
+ * in the ALARM
state.
+ * ActionsSuppressor
can be an AlarmName or an Amazon Resource Name (ARN)
+ * from an existing alarm.
+ *
+ * The maximum time
+ * in seconds
+ * that the composite alarm waits
+ * for the suppressor alarm
+ * to go
+ * into the ALARM
state.
+ * After this time,
+ * the composite alarm performs its actions.
+ *
+ * WaitPeriod
+ * is required only
+ * when ActionsSuppressor
is specified.
+ *
+ * The maximum time
+ * in seconds
+ * that the composite alarm waits
+ * after suppressor alarm goes out
+ * of the ALARM
state.
+ * After this time,
+ * the composite alarm performs its actions.
+ *
+ * ExtensionPeriod
+ * is required only
+ * when ActionsSuppressor
is specified.
+ *
The time stamp of the last update to the alarm state.
" + "smithy.api#documentation": "Tracks the timestamp of any state update, even if StateValue
doesn't change.
The state value for the alarm.
" } + }, + "StateTransitionedTimestamp": { + "target": "com.amazonaws.cloudwatch#Timestamp", + "traits": { + "smithy.api#documentation": "\n\t\t\tThe timestamp \n\t\t\tof the last change \n\t\t\tto the alarm's StateValue
.\n\t\t
\n\t\t\tWhen the value is ALARM
, \n\t\t\tit means \n\t\t\tthat the actions are suppressed\n\t\t\tbecause the suppressor alarm is \n\t\t\tin ALARM
\n\t\t\tWhen the value is WaitPeriod
, \n\t\t\tit means that \n\t\t\tthe actions are suppressed \n\t\t\tbecause the composite alarm is waiting\n\t\t\tfor the suppressor alarm \n\t\t\tto go \n\t\t\tinto \n\t\t\tinto the ALARM
state. \n\t\t\tThe maximum waiting time is as specified\n\t\t\tin ActionsSuppressorWaitPeriod
. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t\tWhen the value is ExtensionPeriod
,\n\t\t\tit means \n\t\t\tthat the actions are suppressed\n\t\t\tbecause the composite alarm is waiting \n\t\t\tafter the suppressor alarm went out\n\t\t\tof the ALARM
state. \n\t\t\tThe maximum waiting time is as specified \n\t\t\tin ActionsSuppressorExtensionPeriod
. \n\t\t\tAfter this time,\n\t\t\tthe composite alarm performs its actions.\n\t\t
\n\t\t\tCaptures the reason for action suppression.\n\t\t
" + } + }, + "ActionsSuppressor": { + "target": "com.amazonaws.cloudwatch#AlarmArn", + "traits": { + "smithy.api#documentation": "\n\t\t\tActions will be suppressed \n\t\t\tif the suppressor alarm is \n\t\t\tin the ALARM
state. \n\t\t\tActionsSuppressor
can be an AlarmName or an Amazon Resource Name (ARN) \n\t\t\tfrom an existing alarm. \n\t\t
\n\t\t\tThe maximum time \n\t\t\tin seconds \n\t\t\tthat the composite alarm waits\n\t\t\tfor the suppressor alarm \n\t\t\tto go \n\t\t\tinto the ALARM
state. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t
\n\t\t\t\t WaitPeriod
\n\t\t\t\tis required only \n\t\t\t\twhen ActionsSuppressor
is specified.\n\t\t\t
\n\t\t\tThe maximum time \n\t\t\tin seconds\n\t\t\tthat the composite alarm waits \n\t\t\tafter suppressor alarm goes out \n\t\t\tof the ALARM
state. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t
\n\t\t\t\t ExtensionPeriod
\n\t\t\t\tis required only \n\t\t\t\twhen ActionsSuppressor
is specified.\n\t\t\t
A list of key-value pairs to associate with the composite alarm. You can associate as many as 50 tags with an alarm.
\n\t\tTags can help you organize and categorize your\n\t\t\tresources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with\n\t\t\tcertain tag values.
" } + }, + "ActionsSuppressor": { + "target": "com.amazonaws.cloudwatch#AlarmArn", + "traits": { + "smithy.api#documentation": "\n\t\t\tActions will be suppressed \n\t\t\tif the suppressor alarm is \n\t\t\tin the ALARM
state.\n\t\t\tActionsSuppressor
can be an AlarmName or an Amazon Resource Name (ARN) \n\t\t\tfrom an existing alarm.\n\t\t
\n\t\t\tThe maximum time \n\t\t\tin seconds \n\t\t\tthat the composite alarm waits\n\t\t\tfor the suppressor alarm \n\t\t\tto go \n\t\t\tinto the ALARM
state. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t
\n\t\t\t\t WaitPeriod
\n\t\t\t\tis required only \n\t\t\t\twhen ActionsSuppressor
is specified.\n\t\t\t
\n\t\t\tThe maximum time \n\t\t\tin seconds\n\t\t\tthat the composite alarm waits \n\t\t\tafter suppressor alarm goes out \n\t\t\tof the ALARM
state. \n\t\t\tAfter this time, \n\t\t\tthe composite alarm performs its actions.\n\t\t
\n\t\t\t\t ExtensionPeriod
\n\t\t\t\tis required only \n\t\t\t\twhen ActionsSuppressor
is specified.\n\t\t\t
The instance is currently unavailable. Wait a few minutes and try again.
+ */ +export class EC2InstanceUnavailableException extends __BaseException { + readonly name: "EC2InstanceUnavailableException" = "EC2InstanceUnavailableException"; + readonly $fault: "server" = "server"; + Message?: string; + /** + * @internal + */ + constructor(opts: __ExceptionOptionTypeOne of the parameters is not valid.
*/ diff --git a/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts b/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts index 287c7db997dae..c677abb234266 100644 --- a/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts +++ b/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts @@ -23,6 +23,7 @@ import { EC2InstanceNotFoundException, EC2InstanceStateInvalidException, EC2InstanceTypeInvalidException, + EC2InstanceUnavailableException, InvalidArgsException, SendSerialConsoleSSHPublicKeyRequest, SendSerialConsoleSSHPublicKeyResponse, @@ -101,6 +102,9 @@ const deserializeAws_json1_1SendSerialConsoleSSHPublicKeyCommandError = async ( case "EC2InstanceTypeInvalidException": case "com.amazonaws.ec2instanceconnect#EC2InstanceTypeInvalidException": throw await deserializeAws_json1_1EC2InstanceTypeInvalidExceptionResponse(parsedOutput, context); + case "EC2InstanceUnavailableException": + case "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException": + throw await deserializeAws_json1_1EC2InstanceUnavailableExceptionResponse(parsedOutput, context); case "InvalidArgsException": case "com.amazonaws.ec2instanceconnect#InvalidArgsException": throw await deserializeAws_json1_1InvalidArgsExceptionResponse(parsedOutput, context); @@ -169,6 +173,9 @@ const deserializeAws_json1_1SendSSHPublicKeyCommandError = async ( case "EC2InstanceStateInvalidException": case "com.amazonaws.ec2instanceconnect#EC2InstanceStateInvalidException": throw await deserializeAws_json1_1EC2InstanceStateInvalidExceptionResponse(parsedOutput, context); + case "EC2InstanceUnavailableException": + case "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException": + throw await deserializeAws_json1_1EC2InstanceUnavailableExceptionResponse(parsedOutput, context); case "InvalidArgsException": case "com.amazonaws.ec2instanceconnect#InvalidArgsException": throw await deserializeAws_json1_1InvalidArgsExceptionResponse(parsedOutput, context); @@ -243,6 +250,19 @@ const deserializeAws_json1_1EC2InstanceTypeInvalidExceptionResponse = async ( return __decorateServiceException(exception, body); }; +const deserializeAws_json1_1EC2InstanceUnavailableExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): PromiseThe instance is currently unavailable. Wait a few minutes and try again.
", + "smithy.api#error": "server", + "smithy.api#httpError": 503 + } + }, "com.amazonaws.ec2instanceconnect#InstanceId": { "type": "string", "traits": { @@ -201,6 +218,9 @@ { "target": "com.amazonaws.ec2instanceconnect#EC2InstanceStateInvalidException" }, + { + "target": "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException" + }, { "target": "com.amazonaws.ec2instanceconnect#InvalidArgsException" }, @@ -285,6 +305,9 @@ { "target": "com.amazonaws.ec2instanceconnect#EC2InstanceTypeInvalidException" }, + { + "target": "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException" + }, { "target": "com.amazonaws.ec2instanceconnect#InvalidArgsException" }, From 6fe9621621e896fa31ad115c3865a35f00b12d3a Mon Sep 17 00:00:00 2001 From: awstoolsReturns query execution runtime statistics related to a single execution of a query if you + * have access to the workgroup in which the query ran. The query execution runtime statistics + * is returned only when QueryExecutionStatus$State is in a SUCCEEDED + * or FAILED state.
+ */ + public getQueryRuntimeStatistics( + args: GetQueryRuntimeStatisticsCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns table metadata for the specified catalog, database, and table.
*/ diff --git a/clients/client-athena/src/AthenaClient.ts b/clients/client-athena/src/AthenaClient.ts index 00fe431ef2d17..0254e0b16fd99 100644 --- a/clients/client-athena/src/AthenaClient.ts +++ b/clients/client-athena/src/AthenaClient.ts @@ -85,6 +85,10 @@ import { } from "./commands/GetPreparedStatementCommand"; import { GetQueryExecutionCommandInput, GetQueryExecutionCommandOutput } from "./commands/GetQueryExecutionCommand"; import { GetQueryResultsCommandInput, GetQueryResultsCommandOutput } from "./commands/GetQueryResultsCommand"; +import { + GetQueryRuntimeStatisticsCommandInput, + GetQueryRuntimeStatisticsCommandOutput, +} from "./commands/GetQueryRuntimeStatisticsCommand"; import { GetTableMetadataCommandInput, GetTableMetadataCommandOutput } from "./commands/GetTableMetadataCommand"; import { GetWorkGroupCommandInput, GetWorkGroupCommandOutput } from "./commands/GetWorkGroupCommand"; import { ListDatabasesCommandInput, ListDatabasesCommandOutput } from "./commands/ListDatabasesCommand"; @@ -139,6 +143,7 @@ export type ServiceInputTypes = | GetPreparedStatementCommandInput | GetQueryExecutionCommandInput | GetQueryResultsCommandInput + | GetQueryRuntimeStatisticsCommandInput | GetTableMetadataCommandInput | GetWorkGroupCommandInput | ListDataCatalogsCommandInput @@ -177,6 +182,7 @@ export type ServiceOutputTypes = | GetPreparedStatementCommandOutput | GetQueryExecutionCommandOutput | GetQueryResultsCommandOutput + | GetQueryRuntimeStatisticsCommandOutput | GetTableMetadataCommandOutput | GetWorkGroupCommandOutput | ListDataCatalogsCommandOutput diff --git a/clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts b/clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts new file mode 100644 index 0000000000000..1d8a4ebd78864 --- /dev/null +++ b/clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts @@ -0,0 +1,102 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AthenaClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AthenaClient"; +import { GetQueryRuntimeStatisticsInput, GetQueryRuntimeStatisticsOutput } from "../models/models_0"; +import { + deserializeAws_json1_1GetQueryRuntimeStatisticsCommand, + serializeAws_json1_1GetQueryRuntimeStatisticsCommand, +} from "../protocols/Aws_json1_1"; + +export interface GetQueryRuntimeStatisticsCommandInput extends GetQueryRuntimeStatisticsInput {} +export interface GetQueryRuntimeStatisticsCommandOutput extends GetQueryRuntimeStatisticsOutput, __MetadataBearer {} + +/** + *Returns query execution runtime statistics related to a single execution of a query if you + * have access to the workgroup in which the query ran. The query execution runtime statistics + * is returned only when QueryExecutionStatus$State is in a SUCCEEDED + * or FAILED state.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AthenaClient, GetQueryRuntimeStatisticsCommand } from "@aws-sdk/client-athena"; // ES Modules import + * // const { AthenaClient, GetQueryRuntimeStatisticsCommand } = require("@aws-sdk/client-athena"); // CommonJS import + * const client = new AthenaClient(config); + * const command = new GetQueryRuntimeStatisticsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetQueryRuntimeStatisticsCommandInput} for command's `input` shape. + * @see {@link GetQueryRuntimeStatisticsCommandOutput} for command's `response` shape. + * @see {@link AthenaClientResolvedConfig | config} for AthenaClient's `config` shape. + * + */ +export class GetQueryRuntimeStatisticsCommand extends $Command< + GetQueryRuntimeStatisticsCommandInput, + GetQueryRuntimeStatisticsCommandOutput, + AthenaClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetQueryRuntimeStatisticsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe unique ID of the query execution.
+ */ + QueryExecutionId: string | undefined; +} + +export namespace GetQueryRuntimeStatisticsInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryRuntimeStatisticsInput): any => ({ + ...obj, + }); +} + +/** + *Statistics such as input rows and bytes read by the query, rows and bytes output by the query, + * and the number of rows written by the query.
+ */ +export interface QueryRuntimeStatisticsRows { + /** + *The number of rows read to execute the query.
+ */ + InputRows?: number; + + /** + *The number of bytes read to execute the query.
+ */ + InputBytes?: number; + + /** + *The number of bytes returned by the query.
+ */ + OutputBytes?: number; + + /** + *The number of rows returned by the query.
+ */ + OutputRows?: number; +} + +export namespace QueryRuntimeStatisticsRows { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryRuntimeStatisticsRows): any => ({ + ...obj, + }); +} + +/** + *Timeline statistics such as query queue time, planning time, execution time, service processing + * time, and total execution time.
+ */ +export interface QueryRuntimeStatisticsTimeline { + /** + *The number of milliseconds that the query was in your query queue waiting for + * resources. Note that if transient errors occur, Athena might automatically + * add the query back to the queue.
+ */ + QueryQueueTimeInMillis?: number; + + /** + *The number of milliseconds that Athena took to plan the query processing + * flow. This includes the time spent retrieving table partitions from the data source. + * Note that because the query engine performs the query planning, query planning time is a + * subset of engine processing time.
+ */ + QueryPlanningTimeInMillis?: number; + + /** + *The number of milliseconds that the query took to execute.
+ */ + EngineExecutionTimeInMillis?: number; + + /** + *The number of milliseconds that Athena took to finalize and publish the + * query results after the query engine finished running the query.
+ */ + ServiceProcessingTimeInMillis?: number; + + /** + *The number of milliseconds that Athena took to run the query.
+ */ + TotalExecutionTimeInMillis?: number; +} + +export namespace QueryRuntimeStatisticsTimeline { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryRuntimeStatisticsTimeline): any => ({ + ...obj, + }); +} + export interface GetTableMetadataInput { /** *The name of the data catalog that contains the database and table metadata to @@ -3198,3 +3295,147 @@ export namespace UpdateWorkGroupOutput { ...obj, }); } + +/** + *
Stage plan information such as name, identifier, sub plans, and remote sources.
+ */ +export interface QueryStagePlanNode { + /** + *Name of the query stage plan that describes the operation this stage is performing as part of + * query execution.
+ */ + Name?: string; + + /** + *Information about the operation this query stage plan node is performing.
+ */ + Identifier?: string; + + /** + *Stage plan information such as name, identifier, sub plans, and remote sources of child plan nodes/
+ */ + Children?: QueryStagePlanNode[]; + + /** + *Source plan node IDs.
+ */ + RemoteSources?: string[]; +} + +export namespace QueryStagePlanNode { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryStagePlanNode): any => ({ + ...obj, + }); +} + +/** + *Stage statistics such as input and output rows and bytes, execution time and stage state. This + * information also includes substages and the query stage plan.
+ */ +export interface QueryStage { + /** + *The identifier for a stage.
+ */ + StageId?: number; + + /** + *State of the stage after query execution.
+ */ + State?: string; + + /** + *The number of bytes output from the stage after execution.
+ */ + OutputBytes?: number; + + /** + *The number of rows output from the stage after execution.
+ */ + OutputRows?: number; + + /** + *The number of bytes input into the stage for execution.
+ */ + InputBytes?: number; + + /** + *The number of rows input into the stage for execution.
+ */ + InputRows?: number; + + /** + *Time taken to execute this stage.
+ */ + ExecutionTime?: number; + + /** + *Stage plan information such as name, identifier, sub plans, and source stages.
+ */ + QueryStagePlan?: QueryStagePlanNode; + + /** + *List of sub query stages that form this stage execution plan.
+ */ + SubStages?: QueryStage[]; +} + +export namespace QueryStage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryStage): any => ({ + ...obj, + }); +} + +/** + *The query execution timeline, statistics on input and output rows and bytes, and the different + * query stages that form the query execution plan.
+ */ +export interface QueryRuntimeStatistics { + /** + *Timeline statistics such as query queue time, planning time, execution time, service processing + * time, and total execution time.
+ */ + Timeline?: QueryRuntimeStatisticsTimeline; + + /** + *Statistics such as input rows and bytes read by the query, rows and bytes output by the query, + * and the number of rows written by the query.
+ */ + Rows?: QueryRuntimeStatisticsRows; + + /** + *Stage statistics such as input and output rows and bytes, execution time, and stage state. This + * information also includes substages and the query stage plan.
+ */ + OutputStage?: QueryStage; +} + +export namespace QueryRuntimeStatistics { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryRuntimeStatistics): any => ({ + ...obj, + }); +} + +export interface GetQueryRuntimeStatisticsOutput { + /** + *Runtime statistics about the query execution.
+ */ + QueryRuntimeStatistics?: QueryRuntimeStatistics; +} + +export namespace GetQueryRuntimeStatisticsOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryRuntimeStatisticsOutput): any => ({ + ...obj, + }); +} diff --git a/clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts b/clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts new file mode 100644 index 0000000000000..52f6913a5ba9b --- /dev/null +++ b/clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { Athena } from "../Athena"; +import { AthenaClient } from "../AthenaClient"; +import { + ListEngineVersionsCommand, + ListEngineVersionsCommandInput, + ListEngineVersionsCommandOutput, +} from "../commands/ListEngineVersionsCommand"; +import { AthenaPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: AthenaClient, + input: ListEngineVersionsCommandInput, + ...args: any +): PromiseReturns query execution runtime statistics related to a single execution of a query if you\n have access to the workgroup in which the query ran. The query execution runtime statistics\n is returned only when QueryExecutionStatus$State is in a SUCCEEDED\n or FAILED state.
" + } + }, + "com.amazonaws.athena#GetQueryRuntimeStatisticsInput": { + "type": "structure", + "members": { + "QueryExecutionId": { + "target": "com.amazonaws.athena#QueryExecutionId", + "traits": { + "smithy.api#documentation": "The unique ID of the query execution.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.athena#GetQueryRuntimeStatisticsOutput": { + "type": "structure", + "members": { + "QueryRuntimeStatistics": { + "target": "com.amazonaws.athena#QueryRuntimeStatistics", + "traits": { + "smithy.api#documentation": "Runtime statistics about the query execution.
" + } + } + } + }, "com.amazonaws.athena#GetTableMetadata": { "type": "operation", "input": { @@ -1848,7 +1894,12 @@ } ], "traits": { - "smithy.api#documentation": "Returns a list of engine versions that are available to choose from, including the\n Auto option.
" + "smithy.api#documentation": "Returns a list of engine versions that are available to choose from, including the\n Auto option.
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } } }, "com.amazonaws.athena#ListEngineVersionsInput": { @@ -2834,6 +2885,202 @@ "smithy.api#documentation": "The completion date, current state, submission time, and state change reason (if\n applicable) for the query execution.
" } }, + "com.amazonaws.athena#QueryRuntimeStatistics": { + "type": "structure", + "members": { + "Timeline": { + "target": "com.amazonaws.athena#QueryRuntimeStatisticsTimeline" + }, + "Rows": { + "target": "com.amazonaws.athena#QueryRuntimeStatisticsRows" + }, + "OutputStage": { + "target": "com.amazonaws.athena#QueryStage", + "traits": { + "smithy.api#documentation": "Stage statistics such as input and output rows and bytes, execution time, and stage state. This \n information also includes substages and the query stage plan.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The query execution timeline, statistics on input and output rows and bytes, and the different\n query stages that form the query execution plan.
" + } + }, + "com.amazonaws.athena#QueryRuntimeStatisticsRows": { + "type": "structure", + "members": { + "InputRows": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of rows read to execute the query.
" + } + }, + "InputBytes": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of bytes read to execute the query.
" + } + }, + "OutputBytes": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of bytes returned by the query.
" + } + }, + "OutputRows": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of rows returned by the query.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Statistics such as input rows and bytes read by the query, rows and bytes output by the query,\n and the number of rows written by the query.
" + } + }, + "com.amazonaws.athena#QueryRuntimeStatisticsTimeline": { + "type": "structure", + "members": { + "QueryQueueTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of milliseconds that the query was in your query queue waiting for\n resources. Note that if transient errors occur, Athena might automatically\n add the query back to the queue.
" + } + }, + "QueryPlanningTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of milliseconds that Athena took to plan the query processing\n flow. This includes the time spent retrieving table partitions from the data source.\n Note that because the query engine performs the query planning, query planning time is a\n subset of engine processing time.
" + } + }, + "EngineExecutionTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of milliseconds that the query took to execute.
" + } + }, + "ServiceProcessingTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of milliseconds that Athena took to finalize and publish the\n query results after the query engine finished running the query.
" + } + }, + "TotalExecutionTimeInMillis": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of milliseconds that Athena took to run the query.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Timeline statistics such as query queue time, planning time, execution time, service processing\n time, and total execution time.
" + } + }, + "com.amazonaws.athena#QueryStage": { + "type": "structure", + "members": { + "StageId": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The identifier for a stage.
" + } + }, + "State": { + "target": "com.amazonaws.athena#String", + "traits": { + "smithy.api#documentation": "State of the stage after query execution.
" + } + }, + "OutputBytes": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of bytes output from the stage after execution.
" + } + }, + "OutputRows": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of rows output from the stage after execution.
" + } + }, + "InputBytes": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of bytes input into the stage for execution.
" + } + }, + "InputRows": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "The number of rows input into the stage for execution.
" + } + }, + "ExecutionTime": { + "target": "com.amazonaws.athena#Long", + "traits": { + "smithy.api#documentation": "Time taken to execute this stage.
" + } + }, + "QueryStagePlan": { + "target": "com.amazonaws.athena#QueryStagePlanNode", + "traits": { + "smithy.api#documentation": "Stage plan information such as name, identifier, sub plans, and source stages.
" + } + }, + "SubStages": { + "target": "com.amazonaws.athena#QueryStages", + "traits": { + "smithy.api#documentation": "List of sub query stages that form this stage execution plan.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Stage statistics such as input and output rows and bytes, execution time and stage state. This \n information also includes substages and the query stage plan.
" + } + }, + "com.amazonaws.athena#QueryStagePlanNode": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.athena#String", + "traits": { + "smithy.api#documentation": "Name of the query stage plan that describes the operation this stage is performing as part of\n query execution.
" + } + }, + "Identifier": { + "target": "com.amazonaws.athena#String", + "traits": { + "smithy.api#documentation": "Information about the operation this query stage plan node is performing.
" + } + }, + "Children": { + "target": "com.amazonaws.athena#QueryStagePlanNodes", + "traits": { + "smithy.api#documentation": "Stage plan information such as name, identifier, sub plans, and remote sources of child plan nodes/
" + } + }, + "RemoteSources": { + "target": "com.amazonaws.athena#StringList", + "traits": { + "smithy.api#documentation": "Source plan node IDs.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Stage plan information such as name, identifier, sub plans, and remote sources.
" + } + }, + "com.amazonaws.athena#QueryStagePlanNodes": { + "type": "list", + "member": { + "target": "com.amazonaws.athena#QueryStagePlanNode" + } + }, + "com.amazonaws.athena#QueryStages": { + "type": "list", + "member": { + "target": "com.amazonaws.athena#QueryStage" + } + }, "com.amazonaws.athena#QueryString": { "type": "string", "traits": { @@ -3165,6 +3412,12 @@ "com.amazonaws.athena#String": { "type": "string" }, + "com.amazonaws.athena#StringList": { + "type": "list", + "member": { + "target": "com.amazonaws.athena#String" + } + }, "com.amazonaws.athena#TableMetadata": { "type": "structure", "members": { From 3c22d7a670a19b887b04249ac5b47633efe83046 Mon Sep 17 00:00:00 2001 From: awstoolsEndpointType
value, include "mysql"
, "oracle"
,
* "postgres"
, "mariadb"
, "aurora"
,
* "aurora-postgresql"
, "opensearch"
, "redshift"
, "s3"
,
- * "db2"
, "azuredb"
, "sybase"
, "dynamodb"
, "mongodb"
,
+ * "db2"
, db2-zos
, "azuredb"
, "sybase"
, "dynamodb"
, "mongodb"
,
* "kinesis"
, "kafka"
, "elasticsearch"
, "docdb"
,
- * "sqlserver"
, and "neptune"
.
+ * "sqlserver"
, "neptune"
, and babelfish
.
*/
EngineName: string | undefined;
@@ -8885,7 +8885,7 @@ export interface TableStatistics {
/**
* The state of the tables described.
*Valid states: Table does not exist | Before load | Full load | Table completed | Table - * cancelled | Table error | Table all | Table updates | Table is being reloaded
+ * cancelled | Table error | Table is being reloaded */ TableState?: string; diff --git a/codegen/sdk-codegen/aws-models/database-migration-service.json b/codegen/sdk-codegen/aws-models/database-migration-service.json index c3fc817717640..0db86e9d0b13b 100644 --- a/codegen/sdk-codegen/aws-models/database-migration-service.json +++ b/codegen/sdk-codegen/aws-models/database-migration-service.json @@ -958,7 +958,7 @@ "EngineName": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "The type of engine for the endpoint. Valid values, depending on the\n EndpointType
value, include \"mysql\"
, \"oracle\"
,\n \"postgres\"
, \"mariadb\"
, \"aurora\"
, \n \"aurora-postgresql\"
, \"opensearch\"
, \"redshift\"
, \"s3\"
,\n \"db2\"
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
,\n \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"docdb\"
,\n \"sqlserver\"
, and \"neptune\"
.
The type of engine for the endpoint. Valid values, depending on the\n EndpointType
value, include \"mysql\"
, \"oracle\"
,\n \"postgres\"
, \"mariadb\"
, \"aurora\"
, \n \"aurora-postgresql\"
, \"opensearch\"
, \"redshift\"
, \"s3\"
,\n \"db2\"
, db2-zos
, \"azuredb\"
, \"sybase\"
, \"dynamodb\"
, \"mongodb\"
,\n \"kinesis\"
, \"kafka\"
, \"elasticsearch\"
, \"docdb\"
,\n \"sqlserver\"
, \"neptune\"
, and babelfish
.
The state of the tables described.
\nValid states: Table does not exist | Before load | Full load | Table completed | Table\n cancelled | Table error | Table all | Table updates | Table is being reloaded
" + "smithy.api#documentation": "The state of the tables described.
\nValid states: Table does not exist | Before load | Full load | Table completed | Table\n cancelled | Table error | Table is being reloaded
" } }, "ValidationPendingRecords": { From de0697ad65ae98358ea534d9c2c90eadf4111bce Mon Sep 17 00:00:00 2001 From: awstoolsYou can use this to re-configure your existing document level access - * control without indexing all of your documents again. For example, your - * index contains top-secret company documents that only certain employees - * or users should access. One of these users leaves the company or switches - * to a team that should be blocked from access to top-secret documents. - * Your documents in your index still give this user access to top-secret - * documents due to the user having access at the time your documents were - * indexed. You can create a specific access control configuration for this - * user with deny access. You can later update the access control - * configuration to allow access in the case the user returns to the company - * and re-joins the 'top-secret' team. You can re-configure access control - * for your documents circumstances change.
+ *You can use this to re-configure your existing document level access control without + * indexing all of your documents again. For example, your index contains top-secret + * company documents that only certain employees or users should access. One of these users + * leaves the company or switches to a team that should be blocked from accessing + * top-secret documents. The user still has access to top-secret documents because the user + * had access when your documents were previously indexed. You + * can create a specific access control configuration for the user with deny + * access. You can later update the access control configuration to allow access if the + * user returns to the company and re-joins the 'top-secret' team. You can re-configure + * access control for your documents as circumstances change.
*To apply your access control configuration to certain documents, you call
* the BatchPutDocument
* API with the AccessControlConfigurationId
included in the
@@ -2176,12 +2174,12 @@ export class Kendra extends KendraClient {
* suddenly returns to their previous team and should no longer have access to top secret
* documents. You can update the access control configuration to re-configure access
* control for your documents as circumstances change.
You call the BatchPutDocument
- * API to apply the updated access control configuration, with the
- * AccessControlConfigurationId
included in the
+ *
You call the BatchPutDocument API to
+ * apply the updated access control configuration, with the
+ * AccessControlConfigurationId
included in the
* Document
* object. If you use an S3 bucket as a data source, you synchronize your data source to
- * apply the the AccessControlConfigurationId
in the .metadata.json
file.
+ * apply the AccessControlConfigurationId
in the .metadata.json
file.
* Amazon Kendra currently only supports access control configuration for S3 data
* sources and documents indexed using the BatchPutDocument
API.
You can use this to re-configure your existing document level access - * control without indexing all of your documents again. For example, your - * index contains top-secret company documents that only certain employees - * or users should access. One of these users leaves the company or switches - * to a team that should be blocked from access to top-secret documents. - * Your documents in your index still give this user access to top-secret - * documents due to the user having access at the time your documents were - * indexed. You can create a specific access control configuration for this - * user with deny access. You can later update the access control - * configuration to allow access in the case the user returns to the company - * and re-joins the 'top-secret' team. You can re-configure access control - * for your documents circumstances change.
+ *You can use this to re-configure your existing document level access control without + * indexing all of your documents again. For example, your index contains top-secret + * company documents that only certain employees or users should access. One of these users + * leaves the company or switches to a team that should be blocked from accessing + * top-secret documents. The user still has access to top-secret documents because the user + * had access when your documents were previously indexed. You + * can create a specific access control configuration for the user with deny + * access. You can later update the access control configuration to allow access if the + * user returns to the company and re-joins the 'top-secret' team. You can re-configure + * access control for your documents as circumstances change.
*To apply your access control configuration to certain documents, you call
* the BatchPutDocument
* API with the AccessControlConfigurationId
included in the
diff --git a/clients/client-kendra/src/commands/QueryCommand.ts b/clients/client-kendra/src/commands/QueryCommand.ts
index a43409f1e7746..7211c37bc04b5 100644
--- a/clients/client-kendra/src/commands/QueryCommand.ts
+++ b/clients/client-kendra/src/commands/QueryCommand.ts
@@ -13,8 +13,7 @@ import {
} from "@aws-sdk/types";
import { KendraClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KendraClient";
-import { QueryResult } from "../models/models_0";
-import { QueryRequest } from "../models/models_1";
+import { QueryRequest, QueryResult } from "../models/models_1";
import { deserializeAws_json1_1QueryCommand, serializeAws_json1_1QueryCommand } from "../protocols/Aws_json1_1";
export interface QueryCommandInput extends QueryRequest {}
diff --git a/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts b/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts
index 2248110a4e519..825a9425d5ed9 100644
--- a/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts
+++ b/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts
@@ -37,12 +37,12 @@ export interface UpdateAccessControlConfigurationCommandOutput
* suddenly returns to their previous team and should no longer have access to top secret
* documents. You can update the access control configuration to re-configure access
* control for your documents as circumstances change.
You call the BatchPutDocument
- * API to apply the updated access control configuration, with the
- * AccessControlConfigurationId
included in the
+ *
You call the BatchPutDocument API to
+ * apply the updated access control configuration, with the
+ * AccessControlConfigurationId
included in the
* Document
* object. If you use an S3 bucket as a data source, you synchronize your data source to
- * apply the the AccessControlConfigurationId
in the .metadata.json
file.
+ * apply the AccessControlConfigurationId
in the .metadata.json
file.
* Amazon Kendra currently only supports access control configuration for S3 data
* sources and documents indexed using the BatchPutDocument
API.
You can also provide OAuth authentication credentials of user name, + * password, client ID, and client secret. For more information, see + * Authentication + * for a SharePoint data source.
*/ SecretArn: string | undefined; @@ -4207,6 +4216,14 @@ export interface SharePointConfiguration { * this to connect to SharePoint. */ SslCertificateS3Path?: S3Path; + + /** + *Whether you want to connect to SharePoint using basic authentication of + * user name and password, or OAuth authentication of user name, password, + * client ID, and client secret. You can use OAuth authentication for + * SharePoint Online.
+ */ + AuthenticationType?: SharePointOnlineAuthenticationType | string; } export namespace SharePointConfiguration { @@ -5906,7 +5923,8 @@ export interface DescribeAccessControlConfigurationRequest { IndexId: string | undefined; /** - *The identifier of the access control configuration you want to get information on.
+ *The identifier of the access control configuration + * you want to get information on.
*/ Id: string | undefined; } @@ -5945,8 +5963,8 @@ export interface DescribeAccessControlConfigurationResponse { AccessControlList?: Principal[]; /** - *The list of principal lists that - * define the hierarchy for which documents users should have access to.
+ *The list of principal + * lists that define the hierarchy for which documents users should have access to.
*/ HierarchicalAccessControlList?: HierarchicalPrincipal[]; } @@ -7654,10 +7672,10 @@ export interface ListAccessControlConfigurationsRequest { IndexId: string | undefined; /** - *If the previous response was incomplete (because there is more data - * to retrieve), Amazon Kendra returns a pagination token in the response. - * You can use this pagination token to retrieve the next set of access - * control configurations.
+ *If the previous response was incomplete (because + * there's more data to retrieve), Amazon Kendra returns a pagination token + * in the response. You can use this pagination token to retrieve the next set + * of access control configurations.
*/ NextToken?: string; @@ -7678,9 +7696,9 @@ export namespace ListAccessControlConfigurationsRequest { export interface ListAccessControlConfigurationsResponse { /** - *If the response is truncated, Amazon Kendra returns this token - * that you can use in the subsequent request to retrieve the next set of - * access control configurations.
+ *If the response is truncated, Amazon Kendra returns this + * token, which you can use in the subsequent request to retrieve the + * next set of access control configurations.
*/ NextToken?: string; @@ -10328,53 +10346,3 @@ export namespace AttributeFilter { ...obj, }); } - -export interface QueryResult { - /** - *The unique identifier for the search. You use QueryId
- * to identify the search when using the feedback API.
The results of the search.
- */ - ResultItems?: QueryResultItem[]; - - /** - *Contains the facet results. A FacetResult
contains the
- * counts for each attribute key that was specified in the
- * Facets
input parameter.
The total number of items found by the search; however, you can only - * retrieve up to 100 items. For example, if the search found 192 items, - * you can only retrieve the first 100 of the items.
- */ - TotalNumberOfResults?: number; - - /** - *A list of warning codes and their messages on problems with your query.
- *Amazon Kendra currently only supports one type of warning, which is a warning - * on invalid syntax used in the query. For examples of invalid query syntax, - * see Searching - * with advanced query syntax.
- */ - Warnings?: Warning[]; - - /** - *A list of information related to suggested spell corrections for a query.
- */ - SpellCorrectedQueries?: SpellCorrectedQuery[]; -} - -export namespace QueryResult { - /** - * @internal - */ - export const filterSensitiveLog = (obj: QueryResult): any => ({ - ...obj, - }); -} diff --git a/clients/client-kendra/src/models/models_1.ts b/clients/client-kendra/src/models/models_1.ts index e6379c5c9fc40..9dfdd7ad900b7 100644 --- a/clients/client-kendra/src/models/models_1.ts +++ b/clients/client-kendra/src/models/models_1.ts @@ -3,12 +3,66 @@ import { AttributeFilter, DocumentRelevanceConfiguration, Facet, + FacetResult, + QueryResultItem, QueryResultType, SortingConfiguration, + SpellCorrectedQuery, SpellCorrectionConfiguration, UserContext, + Warning, } from "./models_0"; +export interface QueryResult { + /** + *The unique identifier for the search. You use QueryId
+ * to identify the search when using the feedback API.
The results of the search.
+ */ + ResultItems?: QueryResultItem[]; + + /** + *Contains the facet results. A FacetResult
contains the
+ * counts for each attribute key that was specified in the
+ * Facets
input parameter.
The total number of items found by the search; however, you can only + * retrieve up to 100 items. For example, if the search found 192 items, + * you can only retrieve the first 100 of the items.
+ */ + TotalNumberOfResults?: number; + + /** + *A list of warning codes and their messages on problems with your query.
+ *Amazon Kendra currently only supports one type of warning, which is a warning + * on invalid syntax used in the query. For examples of invalid query syntax, + * see Searching + * with advanced query syntax.
+ */ + Warnings?: Warning[]; + + /** + *A list of information related to suggested spell corrections for a query.
+ */ + SpellCorrectedQueries?: SpellCorrectedQuery[]; +} + +export namespace QueryResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryResult): any => ({ + ...obj, + }); +} + export interface QueryRequest { /** *The unique identifier of the index to search. The identifier is diff --git a/clients/client-kendra/src/protocols/Aws_json1_1.ts b/clients/client-kendra/src/protocols/Aws_json1_1.ts index 2e07cef676f0b..53736bc9acc89 100644 --- a/clients/client-kendra/src/protocols/Aws_json1_1.ts +++ b/clients/client-kendra/src/protocols/Aws_json1_1.ts @@ -345,7 +345,6 @@ import { Principal, ProxyConfiguration, PutPrincipalMappingRequest, - QueryResult, QueryResultItem, QuerySuggestionsBlockListSummary, QuipConfiguration, @@ -420,7 +419,7 @@ import { WebCrawlerConfiguration, WorkDocsConfiguration, } from "../models/models_0"; -import { QueryRequest } from "../models/models_1"; +import { QueryRequest, QueryResult } from "../models/models_1"; export const serializeAws_json1_1AssociateEntitiesToExperienceCommand = async ( input: AssociateEntitiesToExperienceCommandInput, @@ -7290,6 +7289,7 @@ const serializeAws_json1_1ServiceNowServiceCatalogConfiguration = ( const serializeAws_json1_1SharePointConfiguration = (input: SharePointConfiguration, context: __SerdeContext): any => { return { + ...(input.AuthenticationType != null && { AuthenticationType: input.AuthenticationType }), ...(input.CrawlAttachments != null && { CrawlAttachments: input.CrawlAttachments }), ...(input.DisableLocalGroups != null && { DisableLocalGroups: input.DisableLocalGroups }), ...(input.DocumentTitleFieldName != null && { DocumentTitleFieldName: input.DocumentTitleFieldName }), @@ -10761,6 +10761,7 @@ const deserializeAws_json1_1SharePointConfiguration = ( context: __SerdeContext ): SharePointConfiguration => { return { + AuthenticationType: __expectString(output.AuthenticationType), CrawlAttachments: __expectBoolean(output.CrawlAttachments), DisableLocalGroups: __expectBoolean(output.DisableLocalGroups), DocumentTitleFieldName: __expectString(output.DocumentTitleFieldName), diff --git a/codegen/sdk-codegen/aws-models/kendra.json b/codegen/sdk-codegen/aws-models/kendra.json index 332f0c7b4b792..8ba98de8f3432 100644 --- a/codegen/sdk-codegen/aws-models/kendra.json +++ b/codegen/sdk-codegen/aws-models/kendra.json @@ -2115,7 +2115,7 @@ } ], "traits": { - "smithy.api#documentation": "
Creates an access configuration for your documents. This includes \n user and group access information for your documents. This is useful \n for user context filtering, where search results are filtered based \n on the user or their group access to documents.
\nYou can use this to re-configure your existing document level access \n control without indexing all of your documents again. For example, your \n index contains top-secret company documents that only certain employees \n or users should access. One of these users leaves the company or switches \n to a team that should be blocked from access to top-secret documents. \n Your documents in your index still give this user access to top-secret \n documents due to the user having access at the time your documents were \n indexed. You can create a specific access control configuration for this \n user with deny access. You can later update the access control \n configuration to allow access in the case the user returns to the company \n and re-joins the 'top-secret' team. You can re-configure access control \n for your documents circumstances change.
\nTo apply your access control configuration to certain documents, you call \n the BatchPutDocument \n API with the AccessControlConfigurationId
included in the \n Document \n object. If you use an S3 bucket as a data source, you update the \n .metadata.json
with the AccessControlConfigurationId
\n and synchronize your data source. Amazon Kendra currently only supports \n access control configuration for S3 data sources and documents indexed using the \n BatchPutDocument
API.
Creates an access configuration for your documents. This includes \n user and group access information for your documents. This is useful \n for user context filtering, where search results are filtered based \n on the user or their group access to documents.
\nYou can use this to re-configure your existing document level access control without\n indexing all of your documents again. For example, your index contains top-secret\n company documents that only certain employees or users should access. One of these users\n leaves the company or switches to a team that should be blocked from accessing \n top-secret documents. The user still has access to top-secret documents because the user \n had access when your documents were previously indexed. You \n can create a specific access control configuration for the user with deny \n access. You can later update the access control configuration to allow access if the \n user returns to the company and re-joins the 'top-secret' team. You can re-configure \n access control for your documents as circumstances change.
\nTo apply your access control configuration to certain documents, you call \n the BatchPutDocument \n API with the AccessControlConfigurationId
included in the \n Document \n object. If you use an S3 bucket as a data source, you update the \n .metadata.json
with the AccessControlConfigurationId
\n and synchronize your data source. Amazon Kendra currently only supports \n access control configuration for S3 data sources and documents indexed using the \n BatchPutDocument
API.
The identifier of the access control configuration you want to get information on.
", + "smithy.api#documentation": "The identifier of the access control configuration\n you want to get information on.
", "smithy.api#required": {} } } @@ -4042,7 +4042,7 @@ "HierarchicalAccessControlList": { "target": "com.amazonaws.kendra#HierarchicalPrincipalList", "traits": { - "smithy.api#documentation": "The list of principal lists that \n define the hierarchy for which documents users should have access to.
" + "smithy.api#documentation": "The list of principal \n lists that define the hierarchy for which documents users should have access to.
" } } } @@ -7706,7 +7706,7 @@ "NextToken": { "target": "com.amazonaws.kendra#String", "traits": { - "smithy.api#documentation": "If the previous response was incomplete (because there is more data \n to retrieve), Amazon Kendra returns a pagination token in the response. \n You can use this pagination token to retrieve the next set of access \n control configurations.
" + "smithy.api#documentation": "If the previous response was incomplete (because\n there's more data to retrieve), Amazon Kendra returns a pagination token \n in the response. You can use this pagination token to retrieve the next set \n of access control configurations.
" } }, "MaxResults": { @@ -7723,7 +7723,7 @@ "NextToken": { "target": "com.amazonaws.kendra#String", "traits": { - "smithy.api#documentation": "If the response is truncated, Amazon Kendra returns this token \n that you can use in the subsequent request to retrieve the next set of \n access control configurations.
" + "smithy.api#documentation": "If the response is truncated, Amazon Kendra returns this\n token, which you can use in the subsequent request to retrieve the \n next set of access control configurations.
" } }, "AccessControlConfigurations": { @@ -10921,7 +10921,7 @@ "SecretArn": { "target": "com.amazonaws.kendra#SecretArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of an Secrets Manager \n secret that contains the user name and password required to \n connect to the SharePoint instance. \n If you use SharePoint Server, you also need to provide the sever \n domain name as part of the credentials. For\n more information, see Using a\n Microsoft SharePoint Data Source.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of an Secrets Manager \n secret that contains the user name and password required to \n connect to the SharePoint instance. \n If you use SharePoint Server, you also need to provide the sever \n domain name as part of the credentials. For\n more information, see Using a\n Microsoft SharePoint Data Source.
\nYou can also provide OAuth authentication credentials of user name, \n password, client ID, and client secret. For more information, see \n Authentication \n for a SharePoint data source.
", "smithy.api#required": {} } }, @@ -10978,12 +10978,33 @@ "traits": { "smithy.api#documentation": "The path to the SSL certificate stored in an Amazon S3 bucket. You use \n this to connect to SharePoint.
" } + }, + "AuthenticationType": { + "target": "com.amazonaws.kendra#SharePointOnlineAuthenticationType", + "traits": { + "smithy.api#documentation": "Whether you want to connect to SharePoint using basic authentication of \n user name and password, or OAuth authentication of user name, password, \n client ID, and client secret. You can use OAuth authentication for \n SharePoint Online.
" + } } }, "traits": { "smithy.api#documentation": "Provides the configuration information to connect to Microsoft\n SharePoint as your data source.
" } }, + "com.amazonaws.kendra#SharePointOnlineAuthenticationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "HTTP_BASIC", + "name": "HTTP_BASIC" + }, + { + "value": "OAUTH2", + "name": "OAUTH2" + } + ] + } + }, "com.amazonaws.kendra#SharePointUrlList": { "type": "list", "member": { @@ -12128,7 +12149,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an access control configuration for your documents in an index. This \n includes user and group access information for your documents. This is useful \n for user context filtering, where search results are filtered based on the user \n or their group access to documents.
\nYou can update an access control configuration you created without indexing all \n of your documents again. For example, your index contains top-secret company \n documents that only certain employees or users should access. You created an 'allow' \n access control configuration for one user who recently joined the 'top-secret' team, \n switching from a team with 'deny' access to top-secret documents. However, the user \n suddenly returns to their previous team and should no longer have access to top secret \n documents. You can update the access control configuration to re-configure access \n control for your documents as circumstances change.
\nYou call the BatchPutDocument \n API to apply the updated access control configuration, with the \n AccessControlConfigurationId
included in the \n Document \n object. If you use an S3 bucket as a data source, you synchronize your data source to \n apply the the AccessControlConfigurationId
in the .metadata.json
file. \n Amazon Kendra currently only supports access control configuration for S3 data \n sources and documents indexed using the BatchPutDocument
API.
Updates an access control configuration for your documents in an index. This \n includes user and group access information for your documents. This is useful \n for user context filtering, where search results are filtered based on the user \n or their group access to documents.
\nYou can update an access control configuration you created without indexing all \n of your documents again. For example, your index contains top-secret company \n documents that only certain employees or users should access. You created an 'allow' \n access control configuration for one user who recently joined the 'top-secret' team, \n switching from a team with 'deny' access to top-secret documents. However, the user \n suddenly returns to their previous team and should no longer have access to top secret \n documents. You can update the access control configuration to re-configure access \n control for your documents as circumstances change.
\nYou call the BatchPutDocument API to\n apply the updated access control configuration, with the\n AccessControlConfigurationId
included in the \n Document\n object. If you use an S3 bucket as a data source, you synchronize your data source to\n apply the AccessControlConfigurationId
in the .metadata.json
file. \n Amazon Kendra currently only supports access control configuration for S3 data\n sources and documents indexed using the BatchPutDocument
API.
This action only applies to Aurora MySQL DB clusters.
+ *This action applies only to Aurora MySQL DB clusters.
*Copies a snapshot of a DB cluster.
*To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
* must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot
action
- * is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region,
- * you must provide the following values:
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case,
+ * the Amazon Web Services Region where you call the CopyDBClusterSnapshot
operation is the
+ * destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy
+ * an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the
+ * following values:
@@ -1062,49 +1064,14 @@ export class RDS extends RDSClient { *
- * PreSignedUrl
- A URL that contains a Signature Version 4 signed request for the
- * CopyDBClusterSnapshot
action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from.
- * The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be executed in the
- * source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.
The pre-signed URL request must contain the following parameter values:
- *
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB
- * cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
- * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
- * DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
- * SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster
- * snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example,
- * if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
- * looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see - * - * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and - * - * Signature Version 4 Signing Process.
- *If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
* TargetDBClusterSnapshotIdentifier
- The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.
- * SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied.
- * This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier
in the pre-signed URL.
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot
+ * identifier for the encrypted DB cluster snapshot to be copied. This identifier
+ * must be in the ARN format for the source Amazon Web Services Region and is the same value as
+ * the SourceDBClusterSnapshotIdentifier
in the presigned URL.
* To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified @@ -1182,9 +1149,9 @@ export class RDS extends RDSClient { /** *
Copies the specified DB snapshot. The source DB snapshot must be in the available
state.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region
- * where you call the CopyDBSnapshot
action is the destination Amazon Web Services Region for the
- * DB snapshot copy.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the
+ * Amazon Web Services Region where you call the CopyDBSnapshot
operation is the destination
+ * Amazon Web Services Region for the DB snapshot copy.
This command doesn't apply to RDS Custom.
*For more information about copying snapshots, see * Copying a DB Snapshot in the Amazon RDS User Guide.
@@ -1316,9 +1283,7 @@ export class RDS extends RDSClient { *Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.
*You can use the ReplicationSourceIdentifier
parameter to create an Amazon
* Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or
- * PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by
- * ReplicationSourceIdentifier
is encrypted, also specify the
- * PreSignedUrl
parameter.
For more information on Amazon Aurora, see * * What is Amazon Aurora? in the Amazon Aurora User Guide.
@@ -1358,7 +1323,7 @@ export class RDS extends RDSClient { /** *Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.
*This action only applies to Aurora DB clusters.
+ *This action applies only to Aurora DB clusters.
*After you create a DB cluster parameter group, you should wait at least 5 minutes
- * before creating your first DB cluster
- * that uses that DB cluster parameter group as the default parameter
- * group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter
- * group is used as the default for a new DB cluster. This is especially important for parameters
- * that are critical when creating the default database for a DB cluster, such as the character set
- * for the default database defined by the character_set_database
parameter. You can use the
- * Parameter Groups option of the Amazon RDS console or the
- * DescribeDBClusterParameters
action to verify
- * that your DB cluster parameter group has been created or modified.
character_set_database
parameter. You can use
+ * the Parameter Groups option of the Amazon RDS console or the
+ * DescribeDBClusterParameters
operation to verify that your DB
+ * cluster parameter group has been created or modified.
* For more information on Amazon Aurora, see
*
@@ -1490,6 +1455,14 @@ export class RDS extends RDSClient {
/**
* Creates a new DB instance. The new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster.
+ * For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance
+ * to the cluster. For more information about creating an RDS DB instance, see
+ * Creating an Amazon RDS DB instance in the Amazon RDS User Guide. For more information about creating a DB instance in an Aurora DB cluster, see
+ *
+ * Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.
Amazon Aurora doesn't support this action. Call the CreateDBInstance
- * action to create a DB instance for an Aurora DB cluster.
Amazon Aurora doesn't support this operation. Call the CreateDBInstance
+ * operation to create a DB instance for an Aurora DB cluster.
All read replica DB instances are created with backups disabled. All other DB * instance attributes (including DB security groups and DB parameter groups) are inherited * from the source DB instance, except as specified.
@@ -1781,7 +1754,7 @@ export class RDS extends RDSClient { } /** - *Creates an RDS event notification subscription. This action requires a topic Amazon + *
Creates an RDS event notification subscription. This operation requires a topic Amazon * Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. * To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the * topic. The ARN is displayed in the SNS console.
@@ -1845,7 +1818,7 @@ export class RDS extends RDSClient { * Or you can specify an existing Aurora cluster during the create operation, * and this cluster becomes the primary cluster of the global database. *This action only applies to Aurora DB clusters.
+ *This action applies only to Aurora DB clusters.
*Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. * Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be * obtained by providing the name as a parameter.
+ *For more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora + * events in the Amazon Aurora User Guide.
*By default, RDS returns events that were generated in the past hour.
*For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) * in the DB cluster to be the primary DB instance (the cluster writer).
*An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, - * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby + * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby * DB instance when the primary DB instance fails.
*To simulate a failure of a primary instance for testing, you can force a failover.
* Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing
@@ -4242,7 +4217,7 @@ export class RDS extends RDSClient {
* that are critical when creating the default database for a DB cluster, such as the character set
* for the default database defined by the character_set_database
parameter. You can use the
* Parameter Groups option of the Amazon RDS console or the
- * DescribeDBClusterParameters
action to verify
+ * DescribeDBClusterParameters
operation to verify
* that your DB cluster parameter group has been created or modified.
If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora
* applies the update immediately. The cluster restart might interrupt your workload. In that case,
@@ -4304,7 +4279,7 @@ export class RDS extends RDSClient {
* parameter. You can't use all
as a value for that parameter in this
* case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster
- * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are
+ * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are
* returned as values for the restore
attribute.
ValuesToAdd
parameter. You
* can't use all
as a value for that parameter in this case.
* To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or
- * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as
+ * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as
* values for the restore
attribute.
For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide. *
*This command doesn't apply to RDS Custom.
+ *If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster
operation.
Stops automated backup replication for a DB instance.
- *This command doesn't apply to RDS Custom.
+ *This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.
*For more information, see * Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide. *
diff --git a/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts b/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts index 77d8876d1516b..e370d060d1985 100644 --- a/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts +++ b/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts @@ -29,7 +29,7 @@ export interface BacktrackDBClusterCommandOutput extends DBClusterBacktrack, __M * Backtracking an Aurora DB Cluster in the * Amazon Aurora User Guide. *This action only applies to Aurora MySQL DB clusters.
+ *This action applies only to Aurora MySQL DB clusters.
*Copies a snapshot of a DB cluster.
*To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
* must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot
action
- * is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region,
- * you must provide the following values:
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case,
+ * the Amazon Web Services Region where you call the CopyDBClusterSnapshot
operation is the
+ * destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy
+ * an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the
+ * following values:
@@ -38,49 +40,14 @@ export interface CopyDBClusterSnapshotCommandOutput extends CopyDBClusterSnapsho *
- * PreSignedUrl
- A URL that contains a Signature Version 4 signed request for the
- * CopyDBClusterSnapshot
action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from.
- * The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be executed in the
- * source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.
The pre-signed URL request must contain the following parameter values:
- *
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB
- * cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
- * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
- * DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
- * SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster
- * snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example,
- * if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
- * looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see - * - * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and - * - * Signature Version 4 Signing Process.
- *If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
* TargetDBClusterSnapshotIdentifier
- The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.
- * SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied.
- * This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier
in the pre-signed URL.
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot
+ * identifier for the encrypted DB cluster snapshot to be copied. This identifier
+ * must be in the ARN format for the source Amazon Web Services Region and is the same value as
+ * the SourceDBClusterSnapshotIdentifier
in the presigned URL.
* To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified diff --git a/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts b/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts index 5b0b9989906ab..354da5bb7264a 100644 --- a/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts +++ b/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts @@ -25,9 +25,9 @@ export interface CopyDBSnapshotCommandOutput extends CopyDBSnapshotResult, __Met /** *
Copies the specified DB snapshot. The source DB snapshot must be in the available
state.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region
- * where you call the CopyDBSnapshot
action is the destination Amazon Web Services Region for the
- * DB snapshot copy.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the
+ * Amazon Web Services Region where you call the CopyDBSnapshot
operation is the destination
+ * Amazon Web Services Region for the DB snapshot copy.
This command doesn't apply to RDS Custom.
*For more information about copying snapshots, see * Copying a DB Snapshot in the Amazon RDS User Guide.
diff --git a/clients/client-rds/src/commands/CreateDBClusterCommand.ts b/clients/client-rds/src/commands/CreateDBClusterCommand.ts index a21220879534a..1e3004de5e061 100644 --- a/clients/client-rds/src/commands/CreateDBClusterCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterCommand.ts @@ -27,9 +27,7 @@ export interface CreateDBClusterCommandOutput extends CreateDBClusterResult, __M *Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.
*You can use the ReplicationSourceIdentifier
parameter to create an Amazon
* Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or
- * PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by
- * ReplicationSourceIdentifier
is encrypted, also specify the
- * PreSignedUrl
parameter.
For more information on Amazon Aurora, see * * What is Amazon Aurora? in the Amazon Aurora User Guide.
diff --git a/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts b/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts index 77e3008cb4de0..2404b4edf0f20 100644 --- a/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts @@ -25,7 +25,7 @@ export interface CreateDBClusterEndpointCommandOutput extends DBClusterEndpoint, /** *Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.
*This action only applies to Aurora DB clusters.
+ *This action applies only to Aurora DB clusters.
*After you create a DB cluster parameter group, you should wait at least 5 minutes
- * before creating your first DB cluster
- * that uses that DB cluster parameter group as the default parameter
- * group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter
- * group is used as the default for a new DB cluster. This is especially important for parameters
- * that are critical when creating the default database for a DB cluster, such as the character set
- * for the default database defined by the character_set_database
parameter. You can use the
- * Parameter Groups option of the Amazon RDS console or the
- * DescribeDBClusterParameters
action to verify
- * that your DB cluster parameter group has been created or modified.
character_set_database
parameter. You can use
+ * the Parameter Groups option of the Amazon RDS console or the
+ * DescribeDBClusterParameters
operation to verify that your DB
+ * cluster parameter group has been created or modified.
* For more information on Amazon Aurora, see
*
diff --git a/clients/client-rds/src/commands/CreateDBInstanceCommand.ts b/clients/client-rds/src/commands/CreateDBInstanceCommand.ts
index 447a1f1307277..2d232eec34b53 100644
--- a/clients/client-rds/src/commands/CreateDBInstanceCommand.ts
+++ b/clients/client-rds/src/commands/CreateDBInstanceCommand.ts
@@ -24,6 +24,14 @@ export interface CreateDBInstanceCommandOutput extends CreateDBInstanceResult, _
/**
* Creates a new DB instance. The new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster.
+ * For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance
+ * to the cluster. For more information about creating an RDS DB instance, see
+ * Creating an Amazon RDS DB instance in the Amazon RDS User Guide. For more information about creating a DB instance in an Aurora DB cluster, see
+ *
+ * Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.
Amazon Aurora doesn't support this action. Call the CreateDBInstance
- * action to create a DB instance for an Aurora DB cluster.
Amazon Aurora doesn't support this operation. Call the CreateDBInstance
+ * operation to create a DB instance for an Aurora DB cluster.
All read replica DB instances are created with backups disabled. All other DB * instance attributes (including DB security groups and DB parameter groups) are inherited * from the source DB instance, except as specified.
diff --git a/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts b/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts index 65e45dc947df6..c12ea4af2652a 100644 --- a/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts +++ b/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts @@ -23,7 +23,7 @@ export interface CreateEventSubscriptionCommandInput extends CreateEventSubscrip export interface CreateEventSubscriptionCommandOutput extends CreateEventSubscriptionResult, __MetadataBearer {} /** - *Creates an RDS event notification subscription. This action requires a topic Amazon + *
Creates an RDS event notification subscription. This operation requires a topic Amazon * Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. * To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the * topic. The ARN is displayed in the SNS console.
diff --git a/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts b/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts index d025ff7872542..a74eca4ec68f2 100644 --- a/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts +++ b/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts @@ -34,7 +34,7 @@ export interface CreateGlobalClusterCommandOutput extends CreateGlobalClusterRes * Or you can specify an existing Aurora cluster during the create operation, * and this cluster becomes the primary cluster of the global database. *This action only applies to Aurora DB clusters.
+ *This action applies only to Aurora DB clusters.
*Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. * Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be * obtained by providing the name as a parameter.
+ *For more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora + * events in the Amazon Aurora User Guide.
*By default, RDS returns events that were generated in the past hour.
*For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) * in the DB cluster to be the primary DB instance (the cluster writer).
*An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, - * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby + * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby * DB instance when the primary DB instance fails.
*To simulate a failure of a primary instance for testing, you can force a failover.
* Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing
diff --git a/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts b/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts
index 39aa859d1b31e..fd31289a20316 100644
--- a/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts
+++ b/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts
@@ -37,7 +37,7 @@ export interface ModifyDBClusterParameterGroupCommandOutput
* that are critical when creating the default database for a DB cluster, such as the character set
* for the default database defined by the character_set_database
parameter. You can use the
* Parameter Groups option of the Amazon RDS console or the
- * DescribeDBClusterParameters
action to verify
+ * DescribeDBClusterParameters
operation to verify
* that your DB cluster parameter group has been created or modified.
If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora
* applies the update immediately. The cluster restart might interrupt your workload. In that case,
diff --git a/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts b/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts
index 6ad6af00a36f2..07291f8cf7ce0 100644
--- a/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts
+++ b/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts
@@ -42,7 +42,7 @@ export interface ModifyDBClusterSnapshotAttributeCommandOutput
* parameter. You can't use all
as a value for that parameter in this
* case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster
- * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are
+ * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are
* returned as values for the restore
attribute.
ValuesToAdd
parameter. You
* can't use all
as a value for that parameter in this case.
* To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or
- * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as
+ * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as
* values for the restore
attribute.
For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide. *
*This command doesn't apply to RDS Custom.
+ *If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster
operation.
Stops automated backup replication for a DB instance.
- *This command doesn't apply to RDS Custom.
+ *This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.
*For more information, see * Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide. *
diff --git a/clients/client-rds/src/models/models_0.ts b/clients/client-rds/src/models/models_0.ts index d5d05c96e71dc..8d40e4ce37a49 100644 --- a/clients/client-rds/src/models/models_0.ts +++ b/clients/client-rds/src/models/models_0.ts @@ -1632,22 +1632,33 @@ export interface CopyDBClusterSnapshotMessage { KmsKeyId?: string; /** - *The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot
API action in the Amazon Web Services Region that contains the
- * source DB cluster snapshot to copy. The PreSignedUrl
parameter must be used when copying an encrypted DB cluster snapshot from another Amazon Web Services Region.
- * Don't specify PreSignedUrl
when you are copying an encrypted DB cluster snapshot in the same Amazon Web Services Region.
The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be
- * executed in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.
- * The pre-signed URL request must contain the following parameter values:
When you are copying a DB cluster snapshot from one Amazon Web Services GovCloud (US) Region
+ * to another, the URL that contains a Signature Version 4 signed request for the
+ * CopyDBClusterSnapshot
API operation in the Amazon Web Services Region that contains
+ * the source DB cluster snapshot to copy. Use the PreSignedUrl
parameter when
+ * copying an encrypted DB cluster snapshot from another Amazon Web Services Region. Don't specify
+ * PreSignedUrl
when copying an encrypted DB cluster snapshot in the same
+ * Amazon Web Services Region.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other + * Amazon Web Services Regions.
+ *The presigned URL must be a valid request for the
+ * CopyDBClusterSnapshot
API operation that can run in the source
+ * Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request
+ * must contain the following parameter values:
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB
- * cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
- * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
KmsKeyId
- The KMS key identifier for the KMS key
+ * to use to encrypt the copy of the DB cluster snapshot in the destination
+ * Amazon Web Services Region. This is the same identifier for both the
+ * CopyDBClusterSnapshot
operation that is called in the
+ * destination Amazon Web Services Region, and the operation contained in the presigned
+ * URL.
*
- * DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
DestinationRegion
- The name of the Amazon Web Services Region
+ * that the DB cluster snapshot is to be created in.
* @@ -1658,15 +1669,16 @@ export interface CopyDBClusterSnapshotMessage { *
To learn how to generate a Signature Version 4 signed request, see - * * * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and * * Signature Version 4 Signing Process.
*If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify
+ * SourceRegion
(or --source-region
for the CLI)
+ * instead of specifying PreSignedUrl
manually. Specifying
+ * SourceRegion
autogenerates a presigned URL that is a valid request
+ * for the operation that can run in the source Amazon Web Services Region.
arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
.
* If you are copying from a shared manual DB snapshot, * this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.
- *If you are copying an encrypted snapshot
- * this parameter must be in the ARN format for the source Amazon Web Services Region,
- * and must match the SourceDBSnapshotIdentifier
in the PreSignedUrl
parameter.
If you are copying an encrypted snapshot this parameter must be in the ARN format for the source Amazon Web Services Region.
*Constraints:
*The URL that contains a Signature Version 4 signed request for the
- * CopyDBSnapshot
API action in the source Amazon Web Services Region that contains the
- * source DB snapshot to copy.
When you are copying a snapshot from one Amazon Web Services GovCloud (US) Region to another,
+ * the URL that contains a Signature Version 4 signed request for the CopyDBSnapshot
API
+ * operation in the source Amazon Web Services Region that contains the source DB snapshot to copy.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other + * Amazon Web Services Regions.
*You must specify this parameter when you copy an encrypted DB snapshot from another
* Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl
when you are
* copying an encrypted DB snapshot in the same Amazon Web Services Region.
The presigned URL must be a valid request for the CopyDBSnapshot
API action
- * that can be executed in the source Amazon Web Services Region that contains the encrypted DB snapshot to be copied.
- * The presigned URL request must contain the following parameter values:
The presigned URL must be a valid request for the
+ * CopyDBClusterSnapshot
API operation that can run in the source
+ * Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request
+ * must contain the following parameter values:
- * DestinationRegion
- The Amazon Web Services Region that the encrypted DB snapshot is copied to.
- * This Amazon Web Services Region is the same one where the CopyDBSnapshot
action is called that contains this presigned URL.
For example, if you copy an encrypted DB snapshot from the us-west-2 Amazon Web Services Region
- * to the us-east-1 Amazon Web Services Region, then you call the CopyDBSnapshot
action in
- * the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the
- * CopyDBSnapshot
action in the us-west-2 Amazon Web Services Region. For this
- * example, the DestinationRegion
in the presigned URL must be set to
- * the us-east-1 Amazon Web Services Region.
DestinationRegion
- The Amazon Web Services Region that the encrypted DB
+ * snapshot is copied to. This Amazon Web Services Region is the same one where the
+ * CopyDBSnapshot
operation is called that contains this presigned
+ * URL.
+ * For example, if you copy an encrypted DB snapshot from the us-west-2
+ * Amazon Web Services Region to the us-east-1 Amazon Web Services Region, then you call the
+ * CopyDBSnapshot
operation in the us-east-1 Amazon Web Services Region and
+ * provide a presigned URL that contains a call to the CopyDBSnapshot
+ * operation in the us-west-2 Amazon Web Services Region. For this example, the
+ * DestinationRegion
in the presigned URL must be set to the
+ * us-east-1 Amazon Web Services Region.
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB snapshot in the destination Amazon Web Services Region.
- * This is the same identifier for both the CopyDBSnapshot
action that is called in the destination Amazon Web Services Region,
- * and the action contained in the presigned URL.
KmsKeyId
- The KMS key identifier for the KMS key to use to
+ * encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. This is the
+ * same identifier for both the CopyDBSnapshot
operation that is
+ * called in the destination Amazon Web Services Region, and the operation contained in the
+ * presigned URL.
* @@ -2193,9 +2211,11 @@ export interface CopyDBSnapshotMessage { * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and * Signature Version 4 Signing Process.
*If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify
+ * SourceRegion
(or --source-region
for the CLI)
+ * instead of specifying PreSignedUrl
manually. Specifying
+ * SourceRegion
autogenerates a presigned URL that is a valid request
+ * for the operation that can run in the source Amazon Web Services Region.
The status of the VPC security group.
+ *The membership status of the VPC security group.
+ *Currently, the only valid status is active
.
A URL that contains a Signature Version 4 signed request for
- * the CreateDBCluster
action to be called in the source Amazon Web Services Region where the DB cluster is replicated from.
- * Specify PreSignedUrl
only when you are performing cross-Region replication from an encrypted DB cluster.
The pre-signed URL must be a valid request for the CreateDBCluster
API action
- * that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster to be copied.
The pre-signed URL request must contain the following parameter values:
+ *When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another,
+ * an URL that contains a Signature Version 4 signed request for the
+ * CreateDBCluster
operation to be called in the source Amazon Web Services Region where
+ * the DB cluster is replicated from. Specify PreSignedUrl
only when you are
+ * performing cross-Region replication from an encrypted DB cluster.
The presigned URL must be a valid request for the CreateDBCluster
API
+ * operation that can run in the source Amazon Web Services Region that contains the encrypted DB
+ * cluster to copy.
The presigned URL request must contain the following parameter values:
*
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of
- * the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster
- * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
KmsKeyId
- The KMS key identifier for the KMS key to use to
+ * encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should
+ * refer to the same KMS key for both the CreateDBCluster
operation
+ * that is called in the destination Amazon Web Services Region, and the operation contained in
+ * the presigned URL.
* @@ -3805,9 +3832,11 @@ export interface CreateDBClusterMessage { * * Signature Version 4 Signing Process.
*If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify
+ * SourceRegion
(or --source-region
for the CLI)
+ * instead of specifying PreSignedUrl
manually. Specifying
+ * SourceRegion
autogenerates a presigned URL that is a valid request
+ * for the operation that can run in the source Amazon Web Services Region.
Valid for: Aurora DB clusters only
*/ @@ -3819,7 +3848,7 @@ export interface CreateDBClusterMessage { * enabled. *For more information, see * - * IAM Database Authentication in the Amazon Aurora User Guide..
+ * IAM Database Authentication in the Amazon Aurora User Guide. *Valid for: Aurora DB clusters only
*/ EnableIAMDatabaseAuthentication?: boolean; @@ -3871,6 +3900,7 @@ export interface CreateDBClusterMessage { *The global
engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions,
* and global
engine mode isn't required for any 2.x versions.
The multimaster
engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.
The serverless
engine mode only applies for Aurora Serverless v1 DB clusters.
For Aurora PostgreSQL, the global
engine mode isn't required, and both the parallelquery
* and the multimaster
engine modes currently aren't supported.
Limitations and requirements apply to some DB engine modes. For more information, see the @@ -3878,26 +3908,31 @@ export interface CreateDBClusterMessage { *
- * - * Limitations of Aurora Serverless v1 + * Limitations of Aurora + * Serverless v1 + *
+ *- * - * Limitations of Parallel Query + * Limitations of Parallel Query *
*- * - * Limitations of Aurora Global Databases + * Limitations of + * Aurora Global Databases *
*- * - * Limitations of Multi-Master Clusters + * Limitations of + * Multi-Master Clusters *
*The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*Valid for: Multi-AZ DB clusters only
*/ PerformanceInsightsRetentionPeriod?: number; @@ -4332,8 +4395,7 @@ export namespace ClusterPendingModifiedValues { */ export interface ScalingConfigurationInfo { /** - *The maximum capacity for the Aurora DB cluster in serverless
DB engine
- * mode.
The minimum capacity for an Aurora DB cluster in serverless
DB engine mode.
The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *This setting is only for non-Aurora Multi-AZ DB clusters.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -5819,11 +5908,13 @@ export interface CreateDBInstanceMessage { AllocatedStorage?: number; /** - *The compute and memory capacity of the DB instance, for example db.m4.large. + *
The compute and memory capacity of the DB instance, for example db.m5.large. * Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. - * For the full list of DB instance classes, - * and availability for your engine, see - * DB Instance Class in the Amazon RDS User Guide.
+ * For the full list of DB instance classes, and availability for your engine, see + * DB instance + * classes in the Amazon RDS User Guide or + * Aurora + * DB instance classes in the Amazon Aurora User Guide. */ DBInstanceClass: string | undefined; @@ -5983,7 +6074,8 @@ export interface CreateDBInstanceMessage { /** *A list of DB security groups to associate with this DB instance.
- *Default: The default DB security group for the database engine.
+ *This setting applies to the legacy EC2-Classic platform, which is no longer used to create
+ * new DB instances. Use the VpcSecurityGroupIds
setting instead.
Can't be set to 0 if the DB instance is a source to read replicas
*Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance
+ *Can't be set to 0 for an RDS Custom for Oracle DB instance
*A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set
* the AvailabilityZone
parameter if the DB instance is a Multi-AZ deployment.
This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. DB instance Availability Zones (AZs) are managed by the DB cluster.
*/ MultiAZ?: boolean; /** *The version number of the database engine to use.
- *For a list of valid engine versions, use the DescribeDBEngineVersions
action.
For a list of valid engine versions, use the DescribeDBEngineVersions
+ * operation.
The following are the database engines and links to information about the major and minor versions that are available with * Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
*@@ -6232,6 +6329,10 @@ export interface CreateDBInstanceMessage { *
Valid values: license-included
| bring-your-own-license
| general-public-license
*
This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable.
*/ LicenseModel?: string; @@ -6241,6 +6342,10 @@ export interface CreateDBInstanceMessage { *Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 * of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 * of the storage amount for the DB instance.
+ *+ * Amazon Aurora + *
+ *Not applicable. Storage is managed by the DB cluster.
*/ Iops?: number; @@ -6250,6 +6355,10 @@ export interface CreateDBInstanceMessage { * from an option group. Also, that option group can't be removed from a DB instance after it is * associated with a DB instance. *This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable.
*/ OptionGroupName?: string; @@ -6321,12 +6430,20 @@ export interface CreateDBInstanceMessage { *Default: io1
if the Iops
parameter
* is specified, otherwise gp2
*
+ * Amazon Aurora + *
+ *Not applicable. Storage is managed by the DB cluster.
*/ StorageType?: string; /** *The ARN from the key store with which to associate the instance for TDE encryption.
*This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable.
*/ TdeCredentialArn?: string; @@ -6376,6 +6493,10 @@ export interface CreateDBInstanceMessage { *For more information, see * Kerberos Authentication in the Amazon RDS User Guide.
*This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. The domain is managed by the DB cluster.
*/ Domain?: string; @@ -6413,6 +6534,10 @@ export interface CreateDBInstanceMessage { /** *Specify the name of the IAM role to be used when making API calls to the Directory Service.
*This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. The domain is managed by the DB cluster.
*/ DomainIAMRoleName?: string; @@ -6437,11 +6562,14 @@ export interface CreateDBInstanceMessage { /** *A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management * (IAM) accounts to database accounts. By default, mapping isn't enabled.
- *This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora, mapping Amazon Web Services IAM accounts - * to database accounts is managed by the DB cluster.
*For more information, see - * - * IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
+ * + * IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. + *This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.
*/ EnableIAMDatabaseAuthentication?: boolean; @@ -6463,7 +6591,35 @@ export interface CreateDBInstanceMessage { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*This setting doesn't apply to RDS Custom.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -6508,6 +6664,10 @@ export interface CreateDBInstanceMessage { /** *The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.
*This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable.
*/ ProcessorFeatures?: ProcessorFeature[]; @@ -6533,6 +6693,10 @@ export interface CreateDBInstanceMessage { * Managing capacity automatically with Amazon RDS storage autoscaling * in the Amazon RDS User Guide. *This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. Storage is managed by the DB cluster.
*/ MaxAllocatedStorage?: number; @@ -7482,7 +7646,34 @@ export interface DBInstance { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *DBParameterGroup
of source DB instance for a same Region read
* replica, or the default DBParameterGroup
for the specified DB engine for a
* cross-Region read replica.
- * Specifying a parameter group for this operation is only supported for Oracle DB instances. It - * isn't supported for RDS Custom.
+ *Specifying a parameter group for this operation is only supported for MySQL and Oracle DB instances. + * It isn't supported for RDS Custom.
*Constraints:
*The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica
API action
- * in the source Amazon Web Services Region that contains the source DB instance.
When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or
+ * from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4
+ * signed request for the CreateDBInstanceReadReplica
API operation in the
+ * source Amazon Web Services Region that contains the source DB instance.
This setting applies only to Amazon Web Services GovCloud (US) Regions and + * China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions.
*You must specify this parameter when you create an encrypted read replica from
* another Amazon Web Services Region by using the Amazon RDS API. Don't specify
* PreSignedUrl
when you are creating an encrypted read replica in the
* same Amazon Web Services Region.
The presigned URL must be a valid request for the CreateDBInstanceReadReplica
API action
- * that can be executed in the source Amazon Web Services Region that contains the encrypted source DB instance.
- * The presigned URL request must contain the following parameter values:
The presigned URL must be a valid request for the
+ * CreateDBInstanceReadReplica
API operation that can run in the
+ * source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL
+ * request must contain the following parameter values:
* DestinationRegion
- The Amazon Web Services Region that the encrypted read
* replica is created in. This Amazon Web Services Region is the same one where the
- * CreateDBInstanceReadReplica
action is called that contains this presigned URL.
For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region,
- * from a source DB instance in the us-east-2 Amazon Web Services Region,
- * then you call the CreateDBInstanceReadReplica
action in
- * the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the
- * CreateDBInstanceReadReplica
action in the us-west-2 Amazon Web Services Region. For this
- * example, the DestinationRegion
in the presigned URL must be set to
- * the us-east-1 Amazon Web Services Region.
CreateDBInstanceReadReplica
operation is called that contains
+ * this presigned URL.
+ * For example, if you create an encrypted DB instance in the us-west-1
+ * Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you
+ * call the CreateDBInstanceReadReplica
operation in the us-east-1
+ * Amazon Web Services Region and provide a presigned URL that contains a call to the
+ * CreateDBInstanceReadReplica
operation in the us-west-2
+ * Amazon Web Services Region. For this example, the DestinationRegion
in the
+ * presigned URL must be set to the us-east-1 Amazon Web Services Region.
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the key to use to
+ * KmsKeyId
- The KMS key identifier for the key to use to
* encrypt the read replica in the destination Amazon Web Services Region. This is the same
- * identifier for both the CreateDBInstanceReadReplica
action that is
- * called in the destination Amazon Web Services Region, and the action contained in the presigned
- * URL.
CreateDBInstanceReadReplica
operation that
+ * is called in the destination Amazon Web Services Region, and the operation contained in the
+ * presigned URL.
*
@@ -8095,9 +8292,9 @@ export interface CreateDBInstanceReadReplicaMessage {
* SourceRegion
(or --source-region
for the CLI)
* instead of specifying PreSignedUrl
manually. Specifying
* SourceRegion
autogenerates a presigned URL that is a valid request
- * for the operation that can be executed in the source Amazon Web Services Region.
- * SourceRegion
isn't supported for SQL Server, because SQL Server on Amazon RDS
+ * SourceRegion
isn't supported for SQL Server, because Amazon RDS for SQL Server
* doesn't support cross-Region read replicas.
This setting doesn't apply to RDS Custom.
@@ -8133,7 +8330,36 @@ export interface CreateDBInstanceReadReplicaMessage { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
+ * *This setting doesn't apply to RDS Custom.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -8538,8 +8764,8 @@ export interface CreateDBProxyRequest { /** *The kinds of databases that the proxy can connect to. * This value determines which database network protocol the proxy recognizes when it interprets - * network traffic to and from the database. - * The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.
+ * network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specifyMYSQL
.
+ * For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL
.
*/
EngineFamily: EngineFamily | string | undefined;
@@ -8675,7 +8901,9 @@ export interface DBProxy {
Status?: DBProxyStatus | string;
/**
- * The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.
+ *The kinds of databases that the proxy can connect to. This value determines which database network protocol
+ * the proxy recognizes when it interprets network traffic to and from the database. MYSQL
supports Aurora MySQL,
+ * RDS for MariaDB, and RDS for MySQL databases. POSTGRESQL
supports Aurora PostgreSQL and RDS for PostgreSQL databases.
The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon - * Aurora will not create a database in the global database cluster you are creating.
+ *The name for your database of up to 64 alphanumeric characters. If you do not provide + * a name, Amazon Aurora will not create a database in the global database cluster you are + * creating.
*/ DatabaseName?: string; diff --git a/clients/client-rds/src/models/models_1.ts b/clients/client-rds/src/models/models_1.ts index 3738acca2f311..02e3a869c0c33 100644 --- a/clients/client-rds/src/models/models_1.ts +++ b/clients/client-rds/src/models/models_1.ts @@ -117,7 +117,6 @@ export namespace DBSnapshotMessage { export interface DescribeDBSnapshotsMessage { /** *The ID of the DB instance to retrieve the list of DB snapshots for.
- * This parameter can't be used in conjunction with DBSnapshotIdentifier
.
* This parameter isn't case-sensitive.
Constraints:
*A specific DB snapshot identifier to describe. This parameter can't be used in conjunction with DBInstanceIdentifier
.
+ *
A specific DB snapshot identifier to describe. * This value is stored as a lowercase string.
*Constraints:
*Contains the results of a successful invocation of the DescribeEventCategories
operation.
Contains the results of a successful invocation of the DescribeEventCategories + * operation.
*/ export interface EventCategoriesMap { /** @@ -870,7 +870,7 @@ export namespace DescribeEventsMessage { } /** - *This data type is used as a response element in the DescribeEvents
action.
This data type is used as a response element in the DescribeEvents action.
*/ export interface Event { /** @@ -1789,7 +1789,7 @@ export interface DescribeOrderableDBInstanceOptionsMessage { * a pagination token called a marker is included in the response so that * you can retrieve the remaining results. *Default: 100
- *Constraints: Minimum 20, maximum 100.
+ *Constraints: Minimum 20, maximum 10000.
*/ MaxRecords?: number; @@ -3766,7 +3766,35 @@ export interface ModifyDBClusterMessage { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*Valid for: Multi-AZ DB clusters only
*/ PerformanceInsightsRetentionPeriod?: number; @@ -3937,7 +3965,7 @@ export interface ModifyDBClusterSnapshotAttributeMessage { * set this value torestore
.
* To view the list of attributes available to modify, use the - * DescribeDBClusterSnapshotAttributes API action.
+ * DescribeDBClusterSnapshotAttributes API operation. *The new compute and memory capacity of the DB instance, for example db.m4.large. + *
The new compute and memory capacity of the DB instance, for example db.m5.large. * Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. - * For the full list of DB instance classes, - * and availability for your engine, see - * DB Instance Class in the Amazon RDS User Guide.
+ * For the full list of DB instance classes, and availability for your engine, see + * DB instance + * classes in the Amazon RDS User Guide or + * Aurora + * DB instance classes in the Amazon Aurora User Guide. *If you modify the DB instance class, an outage occurs during the change.
* The change is applied during the next maintenance window,
* unless ApplyImmediately
is enabled for this request.
Constraints: Must contain from 8 to 128 characters.
*Amazon RDS API actions never return the password, + *
Amazon RDS API operations never return the password, * so this action provides a way to regain access to a primary instance user if the password is lost. * This includes restoring privileges that might have been accidentally revoked.
*It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to - * read replicas. It can't be set to 0 or 35 for an RDS Custom for Oracle DB instance.
+ * read replicas. It can't be set to 0 for an RDS Custom for Oracle DB instance. *It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or @@ -4585,7 +4615,7 @@ export interface ModifyDBInstanceMessage { /** *
A value that indicates whether to enable Performance Insights for the DB instance.
*For more information, see - * Using Amazon Performance Insights in the Amazon RDS User Guide..
+ * Using Amazon Performance Insights in the Amazon RDS User Guide. *This setting doesn't apply to RDS Custom.
*/ EnablePerformanceInsights?: boolean; @@ -4601,7 +4631,35 @@ export interface ModifyDBInstanceMessage { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*This setting doesn't apply to RDS Custom.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -5023,12 +5081,12 @@ export namespace ConnectionPoolConfiguration { export interface ModifyDBProxyTargetGroupRequest { /** - *The name of the new target group to assign to the proxy.
+ *The name of the target group to modify.
*/ TargetGroupName: string | undefined; /** - *The name of the new proxy to which to assign the target group.
+ *The name of the proxy.
*/ DBProxyName: string | undefined; @@ -5165,7 +5223,7 @@ export interface ModifyDBSnapshotAttributeMessage { * set this value torestore
.
* To view the list of attributes available to modify, use the - * DescribeDBSnapshotAttributes API action.
+ * DescribeDBSnapshotAttributes API operation. *The name of the database engine to be used for this DB cluster.
- *Valid Values: aurora
(for MySQL 5.6-compatible Aurora), aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql
- *
Valid Values: aurora
(for MySQL 5.6-compatible Aurora) and aurora-mysql
+ * (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)
* aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion"
*
To list all of the available engine versions for aurora-postgresql
, use the following command:
- * aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion"
- *
* Aurora MySQL *
- *Example: 5.6.10a
, 5.6.mysql_aurora.1.19.2
, 5.7.12
, 5.7.mysql_aurora.2.04.5
, 8.0.mysql_aurora.3.01.0
- *
- * Aurora PostgreSQL - *
- *Example: 9.6.3
, 10.7
+ *
Example: 5.6.10a
, 5.6.mysql_aurora.1.19.2
, 5.7.mysql_aurora.2.07.1
,
+ * 8.0.mysql_aurora.3.02.0
*
Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..
+ *For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
*For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
*Valid for: Aurora DB clusters and Multi-AZ DB clusters
*/ @@ -7283,7 +7333,7 @@ export interface RestoreDBClusterToPointInTimeMessage { * Aurora PostgreSQL * *Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..
+ *For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
*For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
*Valid for: Aurora DB clusters and Multi-AZ DB clusters
*/ @@ -8249,7 +8299,7 @@ export interface RestoreDBInstanceFromS3Message { /** *A value that indicates whether to enable Performance Insights for the DB instance.
*For more information, see - * Using Amazon Performance Insights in the Amazon RDS User Guide..
+ * Using Amazon Performance Insights in the Amazon RDS User Guide. */ EnablePerformanceInsights?: boolean; @@ -8263,7 +8313,35 @@ export interface RestoreDBInstanceFromS3Message { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -9104,10 +9182,25 @@ export interface StartDBInstanceAutomatedBackupsReplicationMessage { KmsKeyId?: string; /** - *A URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication action to be - * called in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the - * StartDBInstanceAutomatedBackupsReplication API action that can be executed in the Amazon Web Services Region that contains - * the source DB instance.
+ *In an Amazon Web Services GovCloud (US) Region, an URL that contains a Signature Version 4 signed request
+ * for the StartDBInstanceAutomatedBackupsReplication
operation to call
+ * in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the
+ * StartDBInstanceAutomatedBackupsReplication
API operation that can run in
+ * the Amazon Web Services Region that contains the source DB instance.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other + * Amazon Web Services Regions.
+ *To learn how to generate a Signature Version 4 signed request, see + * + * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and + * + * Signature Version 4 Signing Process.
+ *If you are using an Amazon Web Services SDK tool or the CLI, you can specify
+ * SourceRegion
(or --source-region
for the CLI)
+ * instead of specifying PreSignedUrl
manually. Specifying
+ * SourceRegion
autogenerates a presigned URL that is a valid request
+ * for the operation that can run in the source Amazon Web Services Region.
Backtracks a DB cluster to a specific time, without creating a new DB cluster.
\nFor more information on backtracking, see \n \n Backtracking an Aurora DB Cluster in the \n Amazon Aurora User Guide.
\nThis action only applies to Aurora MySQL DB clusters.
\nBacktracks a DB cluster to a specific time, without creating a new DB cluster.
\nFor more information on backtracking, see \n \n Backtracking an Aurora DB Cluster in the \n Amazon Aurora User Guide.
\nThis action applies only to Aurora MySQL DB clusters.
\nCopies a snapshot of a DB cluster.
\nTo copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
\n must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot
action \n is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region, \n you must provide the following values:
\n KmsKeyId
- The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key to use to \n encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region.
\n PreSignedUrl
- A URL that contains a Signature Version 4 signed request for the \n CopyDBClusterSnapshot
action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from. \n The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be executed in the \n source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.
The pre-signed URL request must contain the following parameter values:
\n\n KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB \n cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
\n action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
\n DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
\n SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster \n snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, \n if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
\n looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) \n instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.
\n TargetDBClusterSnapshotIdentifier
- The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.
\n SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. \n This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier
in the pre-signed URL.
To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified\n by TargetDBClusterSnapshotIdentifier
while that DB cluster snapshot is in \"copying\" status.
For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see \n \n Copying a Snapshot in the Amazon Aurora User Guide.
\nFor more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
" + "smithy.api#documentation": "Copies a snapshot of a DB cluster.
\nTo copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
\n must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case,\n the Amazon Web Services Region where you call the CopyDBClusterSnapshot
operation is the\n destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy\n an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the\n following values:
\n KmsKeyId
- The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key to use to \n encrypt the copy of the DB cluster snapshot in the destination Amazon Web Services Region.
\n TargetDBClusterSnapshotIdentifier
- The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.
\n SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot\n identifier for the encrypted DB cluster snapshot to be copied. This identifier\n must be in the ARN format for the source Amazon Web Services Region and is the same value as\n the SourceDBClusterSnapshotIdentifier
in the presigned URL.
To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified\n by TargetDBClusterSnapshotIdentifier
while that DB cluster snapshot is in \"copying\" status.
For more information on copying encrypted Amazon Aurora DB cluster snapshots from one Amazon Web Services Region to another, see \n \n Copying a Snapshot in the Amazon Aurora User Guide.
\nFor more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
" } }, "com.amazonaws.rds#CopyDBClusterSnapshotMessage": { @@ -1561,7 +1561,7 @@ "PreSignedUrl": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot
API action in the Amazon Web Services Region that contains the \n source DB cluster snapshot to copy. The PreSignedUrl
parameter must be used when copying an encrypted DB cluster snapshot from another Amazon Web Services Region. \n Don't specify PreSignedUrl
when you are copying an encrypted DB cluster snapshot in the same Amazon Web Services Region.
The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be\n executed in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied. \n The pre-signed URL request must contain the following parameter values:
\n KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB \n cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
\n action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
\n DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
\n SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster \n snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, \n if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
\n looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see \n\n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) \n instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.
When you are copying a DB cluster snapshot from one Amazon Web Services GovCloud (US) Region\n to another, the URL that contains a Signature Version 4 signed request for the\n CopyDBClusterSnapshot
API operation in the Amazon Web Services Region that contains\n the source DB cluster snapshot to copy. Use the PreSignedUrl
parameter when\n copying an encrypted DB cluster snapshot from another Amazon Web Services Region. Don't specify\n PreSignedUrl
when copying an encrypted DB cluster snapshot in the same\n Amazon Web Services Region.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other\n Amazon Web Services Regions.
\nThe presigned URL must be a valid request for the\n CopyDBClusterSnapshot
API operation that can run in the source\n Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request\n must contain the following parameter values:
\n KmsKeyId
- The KMS key identifier for the KMS key\n to use to encrypt the copy of the DB cluster snapshot in the destination\n Amazon Web Services Region. This is the same identifier for both the\n CopyDBClusterSnapshot
operation that is called in the\n destination Amazon Web Services Region, and the operation contained in the presigned\n URL.
\n DestinationRegion
- The name of the Amazon Web Services Region \n that the DB cluster snapshot is to be created in.
\n SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster \n snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, \n if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
\n looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion
(or --source-region
for the CLI)\n instead of specifying PreSignedUrl
manually. Specifying\n SourceRegion
autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.
Copies the specified DB snapshot. The source DB snapshot must be in the available
state.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region\n where you call the CopyDBSnapshot
action is the destination Amazon Web Services Region for the\n DB snapshot copy.
This command doesn't apply to RDS Custom.
\nFor more information about copying snapshots, see \n Copying a DB Snapshot in the Amazon RDS User Guide.
" + "smithy.api#documentation": "Copies the specified DB snapshot. The source DB snapshot must be in the available
state.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the\n Amazon Web Services Region where you call the CopyDBSnapshot
operation is the destination\n Amazon Web Services Region for the DB snapshot copy.
This command doesn't apply to RDS Custom.
\nFor more information about copying snapshots, see \n Copying a DB Snapshot in the Amazon RDS User Guide.
" } }, "com.amazonaws.rds#CopyDBSnapshotMessage": { @@ -1687,7 +1687,7 @@ "SourceDBSnapshotIdentifier": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The identifier for the source DB snapshot.
\nIf the source snapshot is in the same Amazon Web Services Region as the copy, specify a valid DB\n snapshot identifier. For example, you might specify\n rds:mysql-instance1-snapshot-20130805
.
If the source snapshot is in a different Amazon Web Services Region than the copy, specify a valid DB\n snapshot ARN. For example, you might specify\n arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
.
If you are copying from a shared manual DB snapshot, \n this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.
\nIf you are copying an encrypted snapshot\n this parameter must be in the ARN format for the source Amazon Web Services Region, \n and must match the SourceDBSnapshotIdentifier
in the PreSignedUrl
parameter.
Constraints:
\nMust specify a valid system snapshot in the \"available\" state.
\nExample: rds:mydb-2012-04-02-00-01
\n
Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
\n
The identifier for the source DB snapshot.
\nIf the source snapshot is in the same Amazon Web Services Region as the copy, specify a valid DB\n snapshot identifier. For example, you might specify\n rds:mysql-instance1-snapshot-20130805
.
If the source snapshot is in a different Amazon Web Services Region than the copy, specify a valid DB\n snapshot ARN. For example, you might specify\n arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
.
If you are copying from a shared manual DB snapshot, \n this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.
\nIf you are copying an encrypted snapshot this parameter must be in the ARN format for the source Amazon Web Services Region.
\nConstraints:
\nMust specify a valid system snapshot in the \"available\" state.
\nExample: rds:mydb-2012-04-02-00-01
\n
Example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
\n
The URL that contains a Signature Version 4 signed request for the\n CopyDBSnapshot
API action in the source Amazon Web Services Region that contains the\n source DB snapshot to copy.
You must specify this parameter when you copy an encrypted DB snapshot from another\n Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl
when you are \n copying an encrypted DB snapshot in the same Amazon Web Services Region.
The presigned URL must be a valid request for the CopyDBSnapshot
API action \n that can be executed in the source Amazon Web Services Region that contains the encrypted DB snapshot to be copied. \n The presigned URL request must contain the following parameter values:
\n DestinationRegion
- The Amazon Web Services Region that the encrypted DB snapshot is copied to. \n This Amazon Web Services Region is the same one where the CopyDBSnapshot
action is called that contains this presigned URL.
For example, if you copy an encrypted DB snapshot from the us-west-2 Amazon Web Services Region\n to the us-east-1 Amazon Web Services Region, then you call the CopyDBSnapshot
action in\n the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the\n CopyDBSnapshot
action in the us-west-2 Amazon Web Services Region. For this\n example, the DestinationRegion
in the presigned URL must be set to\n the us-east-1 Amazon Web Services Region.
\n KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. \n This is the same identifier for both the CopyDBSnapshot
action that is called in the destination Amazon Web Services Region, \n and the action contained in the presigned URL.
\n SourceDBSnapshotIdentifier
- The DB snapshot identifier for the encrypted snapshot to be copied. \n This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. \n For example, if you are copying an encrypted DB snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBSnapshotIdentifier
looks like\n the following example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) \n instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.
When you are copying a snapshot from one Amazon Web Services GovCloud (US) Region to another, \n the URL that contains a Signature Version 4 signed request for the CopyDBSnapshot
API \n operation in the source Amazon Web Services Region that contains the source DB snapshot to copy.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other\n Amazon Web Services Regions.
\nYou must specify this parameter when you copy an encrypted DB snapshot from another\n Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl
when you are \n copying an encrypted DB snapshot in the same Amazon Web Services Region.
The presigned URL must be a valid request for the\n CopyDBClusterSnapshot
API operation that can run in the source\n Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request\n must contain the following parameter values:
\n DestinationRegion
- The Amazon Web Services Region that the encrypted DB\n snapshot is copied to. This Amazon Web Services Region is the same one where the\n CopyDBSnapshot
operation is called that contains this presigned\n URL.
For example, if you copy an encrypted DB snapshot from the us-west-2\n Amazon Web Services Region to the us-east-1 Amazon Web Services Region, then you call the\n CopyDBSnapshot
operation in the us-east-1 Amazon Web Services Region and\n provide a presigned URL that contains a call to the CopyDBSnapshot
\n operation in the us-west-2 Amazon Web Services Region. For this example, the\n DestinationRegion
in the presigned URL must be set to the\n us-east-1 Amazon Web Services Region.
\n KmsKeyId
- The KMS key identifier for the KMS key to use to\n encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. This is the\n same identifier for both the CopyDBSnapshot
operation that is\n called in the destination Amazon Web Services Region, and the operation contained in the\n presigned URL.
\n SourceDBSnapshotIdentifier
- The DB snapshot identifier for the encrypted snapshot to be copied. \n This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. \n For example, if you are copying an encrypted DB snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBSnapshotIdentifier
looks like\n the following example: arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion
(or --source-region
for the CLI)\n instead of specifying PreSignedUrl
manually. Specifying\n SourceRegion
autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.
Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.
\nYou can use the ReplicationSourceIdentifier
parameter to create an Amazon\n Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or\n PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by\n ReplicationSourceIdentifier
is encrypted, also specify the\n PreSignedUrl
parameter.
For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
" + "smithy.api#documentation": "Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.
\nYou can use the ReplicationSourceIdentifier
parameter to create an Amazon\n Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or\n PostgreSQL DB instance.
For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
" } }, "com.amazonaws.rds#CreateDBClusterEndpoint": { @@ -1982,7 +1982,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.
\nThis action only applies to Aurora DB clusters.
\nCreates a new custom endpoint and associates it with an Amazon Aurora DB cluster.
\nThis action applies only to Aurora DB clusters.
\nA URL that contains a Signature Version 4 signed request for \n the CreateDBCluster
action to be called in the source Amazon Web Services Region where the DB cluster is replicated from. \n Specify PreSignedUrl
only when you are performing cross-Region replication from an encrypted DB cluster.
The pre-signed URL must be a valid request for the CreateDBCluster
API action \n that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster to be copied.
The pre-signed URL request must contain the following parameter values:
\n\n KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of \n the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster
\n action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
\n DestinationRegion
- The name of the Amazon Web Services Region that Aurora read replica will\n be created in.
\n ReplicationSourceIdentifier
- The DB cluster identifier for the encrypted DB cluster to be copied. \n This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an \n encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier
would look like\n Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1
.
To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI) \n instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid \n request for the operation that can be executed in the source Amazon Web Services Region.
Valid for: Aurora DB clusters only
" + "smithy.api#documentation": "When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another,\n an URL that contains a Signature Version 4 signed request for the\n CreateDBCluster
operation to be called in the source Amazon Web Services Region where\n the DB cluster is replicated from. Specify PreSignedUrl
only when you are\n performing cross-Region replication from an encrypted DB cluster.
The presigned URL must be a valid request for the CreateDBCluster
API\n operation that can run in the source Amazon Web Services Region that contains the encrypted DB\n cluster to copy.
The presigned URL request must contain the following parameter values:
\n\n KmsKeyId
- The KMS key identifier for the KMS key to use to\n encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should\n refer to the same KMS key for both the CreateDBCluster
operation\n that is called in the destination Amazon Web Services Region, and the operation contained in\n the presigned URL.
\n DestinationRegion
- The name of the Amazon Web Services Region that Aurora read replica will\n be created in.
\n ReplicationSourceIdentifier
- The DB cluster identifier for the encrypted DB cluster to be copied. \n This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you are copying an \n encrypted DB cluster from the us-west-2 Amazon Web Services Region, then your ReplicationSourceIdentifier
would look like\n Example: arn:aws:rds:us-west-2:123456789012:cluster:aurora-cluster1
.
To learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion
(or --source-region
for the CLI)\n instead of specifying PreSignedUrl
manually. Specifying\n SourceRegion
autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.
Valid for: Aurora DB clusters only
" } }, "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.
\nFor more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide..
\nValid for: Aurora DB clusters only
" + "smithy.api#documentation": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access\n Management (IAM) accounts to database accounts. By default, mapping isn't\n enabled.
\nFor more information, see \n \n IAM Database Authentication in the Amazon Aurora User Guide.
\nValid for: Aurora DB clusters only
" } }, "BacktrackWindow": { @@ -2181,7 +2181,7 @@ "EngineMode": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The DB engine mode of the DB cluster, either provisioned
, serverless
, \n parallelquery
, global
, or multimaster
.
The parallelquery
engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, \n and version 2.09 and higher 2.x versions.
The global
engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, \n and global
engine mode isn't required for any 2.x versions.
The multimaster
engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.
For Aurora PostgreSQL, the global
engine mode isn't required, and both the parallelquery
\n and the multimaster
engine modes currently aren't supported.
Limitations and requirements apply to some DB engine modes. For more information, see the \n following sections in the Amazon Aurora User Guide:
\nValid for: Aurora DB clusters only
" + "smithy.api#documentation": "The DB engine mode of the DB cluster, either provisioned
, serverless
, \n parallelquery
, global
, or multimaster
.
The parallelquery
engine mode isn't required for Aurora MySQL version 1.23 and higher 1.x versions, \n and version 2.09 and higher 2.x versions.
The global
engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions, \n and global
engine mode isn't required for any 2.x versions.
The multimaster
engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.
The serverless
engine mode only applies for Aurora Serverless v1 DB clusters.
For Aurora PostgreSQL, the global
engine mode isn't required, and both the parallelquery
\n and the multimaster
engine modes currently aren't supported.
Limitations and requirements apply to some DB engine modes. For more information, see the \n following sections in the Amazon Aurora User Guide:
\nValid for: Aurora DB clusters only
" } }, "ScalingConfiguration": { @@ -2295,7 +2295,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
\nValid for: Multi-AZ DB clusters only
" + "smithy.api#documentation": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
\n7
\n\n month * 31, where month is a number of months from 1-23
\n731
\nFor example, the following values are valid:
\n93 (3 months * 31)
\n341 (11 months * 31)
\n589 (19 months * 31)
\n731
\nIf you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
\nValid for: Multi-AZ DB clusters only
" } }, "ServerlessV2ScalingConfiguration": { @@ -2323,7 +2323,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new DB cluster parameter group.
\nParameters in a DB cluster parameter group apply to all of the instances in a DB cluster.
\nA DB cluster parameter group is initially created with the default parameters for the\n database engine used by instances in the DB cluster. To provide custom values for any of the\n parameters, you must modify the group after creating it using\n ModifyDBClusterParameterGroup
. Once you've created a DB cluster parameter group, you need to\n associate it with your DB cluster using ModifyDBCluster
.
When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB\n instances in the DB cluster without failover for the new DB cluster parameter group and \n associated settings to take effect.
\nWhen you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB\n cluster without failover for the new DB cluster parameter group and associated settings to take effect.
\nAfter you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster\n that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database
parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters
action to verify \n that your DB cluster parameter group has been created or modified.
For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
" + "smithy.api#documentation": "Creates a new DB cluster parameter group.
\nParameters in a DB cluster parameter group apply to all of the instances in a DB cluster.
\nA DB cluster parameter group is initially created with the default parameters for the\n database engine used by instances in the DB cluster. To provide custom values for any of the\n parameters, you must modify the group after creating it using\n ModifyDBClusterParameterGroup
. Once you've created a DB cluster parameter group, you need to\n associate it with your DB cluster using ModifyDBCluster
.
When you associate a new DB cluster parameter group with a running Aurora DB cluster, reboot the DB\n instances in the DB cluster without failover for the new DB cluster parameter group and \n associated settings to take effect.
\nWhen you associate a new DB cluster parameter group with a running Multi-AZ DB cluster, reboot the DB\n cluster without failover for the new DB cluster parameter group and associated settings to take effect.
\nAfter you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster that uses that DB cluster parameter group as\n the default parameter group. This allows Amazon RDS to fully complete the create\n action before the DB cluster parameter group is used as the default for a new DB\n cluster. This is especially important for parameters that are critical when creating\n the default database for a DB cluster, such as the character set for the default\n database defined by the character_set_database
parameter. You can use\n the Parameter Groups option of the Amazon RDS console or the\n DescribeDBClusterParameters
operation to verify that your DB\n cluster parameter group has been created or modified.
For more information on Amazon Aurora, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
" } }, "com.amazonaws.rds#CreateDBClusterParameterGroupMessage": { @@ -2513,7 +2513,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new DB instance.
" + "smithy.api#documentation": "Creates a new DB instance.
\nThe new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster. \n For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance \n to the cluster.
\nFor more information about creating an RDS DB instance, see \n Creating an Amazon RDS DB instance in the Amazon RDS User Guide.
\nFor more information about creating a DB instance in an Aurora DB cluster, see \n \n Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.
" } }, "com.amazonaws.rds#CreateDBInstanceMessage": { @@ -2541,7 +2541,7 @@ "DBInstanceClass": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The compute and memory capacity of the DB instance, for example db.m4.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes,\n and availability for your engine, see\n DB Instance Class in the Amazon RDS User Guide.
", + "smithy.api#documentation": "The compute and memory capacity of the DB instance, for example db.m5.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes, and availability for your engine, see\n DB instance \n classes in the Amazon RDS User Guide or \n Aurora \n DB instance classes in the Amazon Aurora User Guide.
", "smithy.api#required": {} } }, @@ -2567,7 +2567,7 @@ "DBSecurityGroups": { "target": "com.amazonaws.rds#DBSecurityGroupNameList", "traits": { - "smithy.api#documentation": "A list of DB security groups to associate with this DB instance.
\nDefault: The default DB security group for the database engine.
" + "smithy.api#documentation": "A list of DB security groups to associate with this DB instance.
\nThis setting applies to the legacy EC2-Classic platform, which is no longer used to create \n new DB instances. Use the VpcSecurityGroupIds
setting instead.
The number of days for which automated backups are retained. Setting this parameter to a positive number enables \n backups. Setting this parameter to 0 disables automated backups.
\n\n Amazon Aurora\n
\nNot applicable. The retention period for automated backups is managed by the DB cluster.
\nDefault: 1
\nConstraints:
\nMust be a value from 0 to 35
\nCan't be set to 0 if the DB instance is a source to read replicas
\nCan't be set to 0 or 35 for an RDS Custom for Oracle DB instance
\nThe number of days for which automated backups are retained. Setting this parameter to a positive number enables \n backups. Setting this parameter to 0 disables automated backups.
\n\n Amazon Aurora\n
\nNot applicable. The retention period for automated backups is managed by the DB cluster.
\nDefault: 1
\nConstraints:
\nMust be a value from 0 to 35
\nCan't be set to 0 if the DB instance is a source to read replicas
\nCan't be set to 0 for an RDS Custom for Oracle DB instance
\nA value that indicates whether the DB instance is a Multi-AZ deployment. You can't set \n the AvailabilityZone
parameter if the DB instance is a Multi-AZ deployment.
This setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set \n the AvailabilityZone
parameter if the DB instance is a Multi-AZ deployment.
This setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable. DB instance Availability Zones (AZs) are managed by the DB cluster.
" } }, "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The version number of the database engine to use.
\nFor a list of valid engine versions, use the DescribeDBEngineVersions
action.
The following are the database engines and links to information about the major and minor versions that are available with \n Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
\n\n Amazon Aurora\n
\nNot applicable. The version number of the database engine to be used by the DB\n instance is managed by the DB cluster.
\n\n Amazon RDS Custom for Oracle\n
\nA custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV \n name has the following format: 19.customized_string\n
. An example identifier is \n 19.my_cev1
. For more information, see \n Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.
\n Amazon RDS Custom for SQL Server\n
\nSee RDS Custom for SQL Server general requirements \n in the Amazon RDS User Guide.
\n\n MariaDB\n
\nFor information, see MariaDB on Amazon RDS Versions in the \n Amazon RDS User Guide.
\n\n Microsoft SQL Server\n
\nFor information, see Microsoft SQL Server Versions on Amazon RDS in the \n Amazon RDS User Guide.
\n\n MySQL\n
\nFor information, see MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.
\n\n Oracle\n
\nFor information, see Oracle Database Engine Release Notes in the \n Amazon RDS User Guide.
\n\n PostgreSQL\n
\nFor information, see Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.
" + "smithy.api#documentation": "The version number of the database engine to use.
\nFor a list of valid engine versions, use the DescribeDBEngineVersions
\n operation.
The following are the database engines and links to information about the major and minor versions that are available with \n Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
\n\n Amazon Aurora\n
\nNot applicable. The version number of the database engine to be used by the DB\n instance is managed by the DB cluster.
\n\n Amazon RDS Custom for Oracle\n
\nA custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV \n name has the following format: 19.customized_string\n
. An example identifier is \n 19.my_cev1
. For more information, see \n Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide.
\n Amazon RDS Custom for SQL Server\n
\nSee RDS Custom for SQL Server general requirements \n in the Amazon RDS User Guide.
\n\n MariaDB\n
\nFor information, see MariaDB on Amazon RDS Versions in the \n Amazon RDS User Guide.
\n\n Microsoft SQL Server\n
\nFor information, see Microsoft SQL Server Versions on Amazon RDS in the \n Amazon RDS User Guide.
\n\n MySQL\n
\nFor information, see MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.
\n\n Oracle\n
\nFor information, see Oracle Database Engine Release Notes in the \n Amazon RDS User Guide.
\n\n PostgreSQL\n
\nFor information, see Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.
" } }, "AutoMinorVersionUpgrade": { @@ -2639,19 +2639,19 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "License model information for this DB instance.
\nValid values: license-included
| bring-your-own-license
| general-public-license
\n
This setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "License model information for this DB instance.
\nValid values: license-included
| bring-your-own-license
| general-public-license
\n
This setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable.
" } }, "Iops": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.\n For information about valid Iops
values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.
Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 \n of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 \n of the storage amount for the DB instance.
" + "smithy.api#documentation": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.\n For information about valid Iops
values, see Amazon RDS Provisioned IOPS storage to improve performance in the Amazon RDS User Guide.
Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 \n of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 \n of the storage amount for the DB instance.
\n\n Amazon Aurora\n
\nNot applicable. Storage is managed by the DB cluster.
" } }, "OptionGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "A value that indicates that the DB instance should be associated with the specified option group.
\nPermanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed \n from an option group. Also, that option group can't be removed from a DB instance after it is \n associated with a DB instance.
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "A value that indicates that the DB instance should be associated with the specified option group.
\nPermanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed \n from an option group. Also, that option group can't be removed from a DB instance after it is \n associated with a DB instance.
\nThis setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable.
" } }, "CharacterSetName": { @@ -2687,13 +2687,13 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "Specifies the storage type to be associated with the DB instance.
\nValid values: standard | gp2 | io1
\n
If you specify io1
, you must also include a value for the\n Iops
parameter.
Default: io1
if the Iops
parameter\n is specified, otherwise gp2
\n
Specifies the storage type to be associated with the DB instance.
\nValid values: standard | gp2 | io1
\n
If you specify io1
, you must also include a value for the\n Iops
parameter.
Default: io1
if the Iops
parameter\n is specified, otherwise gp2
\n
\n Amazon Aurora\n
\nNot applicable. Storage is managed by the DB cluster.
" } }, "TdeCredentialArn": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The ARN from the key store with which to associate the instance for TDE encryption.
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "The ARN from the key store with which to associate the instance for TDE encryption.
\nThis setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable.
" } }, "TdeCredentialPassword": { @@ -2717,7 +2717,7 @@ "Domain": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL \n Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.
\nFor more information, see \n Kerberos Authentication in the Amazon RDS User Guide.
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "The Active Directory directory ID to create the DB instance in. Currently, only MySQL, Microsoft SQL \n Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.
\nFor more information, see \n Kerberos Authentication in the Amazon RDS User Guide.
\nThis setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable. The domain is managed by the DB cluster.
" } }, "CopyTagsToSnapshot": { @@ -2741,7 +2741,7 @@ "DomainIAMRoleName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "Specify the name of the IAM role to be used when making API calls to the Directory Service.
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "Specify the name of the IAM role to be used when making API calls to the Directory Service.
\nThis setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable. The domain is managed by the DB cluster.
" } }, "PromotionTier": { @@ -2759,7 +2759,7 @@ "EnableIAMDatabaseAuthentication": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management\n (IAM) accounts to database accounts. By default, mapping isn't enabled.
\nThis setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora, mapping Amazon Web Services IAM accounts \n to database accounts is managed by the DB cluster.
\nFor more information, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
" + "smithy.api#documentation": "A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management\n (IAM) accounts to database accounts. By default, mapping isn't enabled.
\nFor more information, see \n \n IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
\nThis setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.
" } }, "EnablePerformanceInsights": { @@ -2777,7 +2777,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
\n7
\n\n month * 31, where month is a number of months from 1-23
\n731
\nFor example, the following values are valid:
\n93 (3 months * 31)
\n341 (11 months * 31)
\n589 (19 months * 31)
\n731
\nIf you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
\nThis setting doesn't apply to RDS Custom.
" } }, "EnableCloudwatchLogsExports": { @@ -2789,7 +2789,7 @@ "ProcessorFeatures": { "target": "com.amazonaws.rds#ProcessorFeatureList", "traits": { - "smithy.api#documentation": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.
\nThis setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable.
" } }, "DeletionProtection": { @@ -2801,7 +2801,7 @@ "MaxAllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.
\nFor more information about this setting, including limitations that apply to it, see \n \n Managing capacity automatically with Amazon RDS storage autoscaling \n in the Amazon RDS User Guide.
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.
\nFor more information about this setting, including limitations that apply to it, see \n \n Managing capacity automatically with Amazon RDS storage autoscaling \n in the Amazon RDS User Guide.
\nThis setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable. Storage is managed by the DB cluster.
" } }, "EnableCustomerOwnedIp": { @@ -2904,7 +2904,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new DB instance that acts as a read replica for an existing source DB\n instance. You can create a read replica for a DB instance running MySQL, MariaDB,\n Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read\n Replicas in the Amazon RDS User Guide.
\nAmazon Aurora doesn't support this action. Call the CreateDBInstance
\n action to create a DB instance for an Aurora DB cluster.
All read replica DB instances are created with backups disabled. All other DB\n instance attributes (including DB security groups and DB parameter groups) are inherited\n from the source DB instance, except as specified.
\nYour source DB instance must have backup retention enabled.
\nCreates a new DB instance that acts as a read replica for an existing source DB\n instance. You can create a read replica for a DB instance running MySQL, MariaDB,\n Oracle, PostgreSQL, or SQL Server. For more information, see Working with Read\n Replicas in the Amazon RDS User Guide.
\nAmazon Aurora doesn't support this operation. Call the CreateDBInstance
\n operation to create a DB instance for an Aurora DB cluster.
All read replica DB instances are created with backups disabled. All other DB\n instance attributes (including DB security groups and DB parameter groups) are inherited\n from the source DB instance, except as specified.
\nYour source DB instance must have backup retention enabled.
\nThe name of the DB parameter group to associate with this DB instance.
\nIf you do not specify a value for DBParameterGroupName
, then Amazon RDS\n uses the DBParameterGroup
of source DB instance for a same Region read\n replica, or the default DBParameterGroup
for the specified DB engine for a\n cross-Region read replica.
Specifying a parameter group for this operation is only supported for Oracle DB instances. It \n isn't supported for RDS Custom.
\nConstraints:
\nMust be 1 to 255 letters, numbers, or hyphens.
\nFirst character must be a letter
\nCan't end with a hyphen or contain two consecutive hyphens
\nThe name of the DB parameter group to associate with this DB instance.
\nIf you do not specify a value for DBParameterGroupName
, then Amazon RDS\n uses the DBParameterGroup
of source DB instance for a same Region read\n replica, or the default DBParameterGroup
for the specified DB engine for a\n cross-Region read replica.
Specifying a parameter group for this operation is only supported for MySQL and Oracle DB instances. \n It isn't supported for RDS Custom.
\nConstraints:
\nMust be 1 to 255 letters, numbers, or hyphens.
\nFirst character must be a letter
\nCan't end with a hyphen or contain two consecutive hyphens
\nThe URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica
API action \n in the source Amazon Web Services Region that contains the source DB instance.
You must specify this parameter when you create an encrypted read replica from\n another Amazon Web Services Region by using the Amazon RDS API. Don't specify\n PreSignedUrl
when you are creating an encrypted read replica in the\n same Amazon Web Services Region.
The presigned URL must be a valid request for the CreateDBInstanceReadReplica
API action \n that can be executed in the source Amazon Web Services Region that contains the encrypted source DB instance. \n The presigned URL request must contain the following parameter values:
\n DestinationRegion
- The Amazon Web Services Region that the encrypted read\n replica is created in. This Amazon Web Services Region is the same one where the\n CreateDBInstanceReadReplica
action is called that contains this presigned URL.
For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region,\n from a source DB instance in the us-east-2 Amazon Web Services Region, \n then you call the CreateDBInstanceReadReplica
action in\n the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the\n CreateDBInstanceReadReplica
action in the us-west-2 Amazon Web Services Region. For this\n example, the DestinationRegion
in the presigned URL must be set to\n the us-east-1 Amazon Web Services Region.
\n KmsKeyId
- The Amazon Web Services KMS key identifier for the key to use to\n encrypt the read replica in the destination Amazon Web Services Region. This is the same\n identifier for both the CreateDBInstanceReadReplica
action that is\n called in the destination Amazon Web Services Region, and the action contained in the presigned\n URL.
\n SourceDBInstanceIdentifier
- The DB instance identifier for\n the encrypted DB instance to be replicated. This identifier must be in the\n Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you\n are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services\n Region, then your SourceDBInstanceIdentifier
looks like the\n following example:\n arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115
.
To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion
(or --source-region
for the CLI)\n instead of specifying PreSignedUrl
manually. Specifying\n SourceRegion
autogenerates a presigned URL that is a valid request\n for the operation that can be executed in the source Amazon Web Services Region.
\n SourceRegion
isn't supported for SQL Server, because SQL Server on Amazon RDS\n doesn't support cross-Region read replicas.
This setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or\n from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4\n signed request for the CreateDBInstanceReadReplica
API operation in the\n source Amazon Web Services Region that contains the source DB instance.
This setting applies only to Amazon Web Services GovCloud (US) Regions and \n China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions.
\nYou must specify this parameter when you create an encrypted read replica from\n another Amazon Web Services Region by using the Amazon RDS API. Don't specify\n PreSignedUrl
when you are creating an encrypted read replica in the\n same Amazon Web Services Region.
The presigned URL must be a valid request for the\n CreateDBInstanceReadReplica
API operation that can run in the\n source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL\n request must contain the following parameter values:
\n DestinationRegion
- The Amazon Web Services Region that the encrypted read\n replica is created in. This Amazon Web Services Region is the same one where the\n CreateDBInstanceReadReplica
operation is called that contains\n this presigned URL.
For example, if you create an encrypted DB instance in the us-west-1\n Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you\n call the CreateDBInstanceReadReplica
operation in the us-east-1\n Amazon Web Services Region and provide a presigned URL that contains a call to the\n CreateDBInstanceReadReplica
operation in the us-west-2\n Amazon Web Services Region. For this example, the DestinationRegion
in the\n presigned URL must be set to the us-east-1 Amazon Web Services Region.
\n KmsKeyId
- The KMS key identifier for the key to use to\n encrypt the read replica in the destination Amazon Web Services Region. This is the same\n identifier for both the CreateDBInstanceReadReplica
operation that\n is called in the destination Amazon Web Services Region, and the operation contained in the\n presigned URL.
\n SourceDBInstanceIdentifier
- The DB instance identifier for\n the encrypted DB instance to be replicated. This identifier must be in the\n Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example, if you\n are creating an encrypted read replica from a DB instance in the us-west-2 Amazon Web Services\n Region, then your SourceDBInstanceIdentifier
looks like the\n following example:\n arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115
.
To learn how to generate a Signature Version 4 signed request, see \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion
(or --source-region
for the CLI)\n instead of specifying PreSignedUrl
manually. Specifying\n SourceRegion
autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.
\n SourceRegion
isn't supported for SQL Server, because Amazon RDS for SQL Server \n doesn't support cross-Region read replicas.
This setting doesn't apply to RDS Custom.
" } }, "EnableIAMDatabaseAuthentication": { @@ -3050,7 +3050,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
\n7
\n\n month * 31, where month is a number of months from 1-23
\n731
\nFor example, the following values are valid:
\n93 (3 months * 31)
\n341 (11 months * 31)
\n589 (19 months * 31)
\n731
\nIf you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
\n \nThis setting doesn't apply to RDS Custom.
" } }, "EnableCloudwatchLogsExports": { @@ -3311,7 +3311,7 @@ "EngineFamily": { "target": "com.amazonaws.rds#EngineFamily", "traits": { - "smithy.api#documentation": "The kinds of databases that the proxy can connect to. \n This value determines which database network protocol the proxy recognizes when it interprets\n network traffic to and from the database. \n The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.
", + "smithy.api#documentation": "The kinds of databases that the proxy can connect to. \n This value determines which database network protocol the proxy recognizes when it interprets\n network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify MYSQL
. \n For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL
.
Creates an RDS event notification subscription. This action requires a topic Amazon\n Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API.\n To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the\n topic. The ARN is displayed in the SNS console.
\nYou can specify the type of source (SourceType
) that you want to be\n notified of and provide a list of RDS sources (SourceIds
) that triggers the\n events. You can also provide a list of event categories (EventCategories
)\n for events that you want to be notified of. For example, you can specify\n SourceType
= db-instance
, SourceIds
=\n mydbinstance1
, mydbinstance2
and\n EventCategories
= Availability
,\n Backup
.
If you specify both the SourceType
and SourceIds
, such as SourceType
= db-instance
\n and SourceIds
= myDBInstance1
, you are notified of all the db-instance
events for\n the specified source. If you specify a SourceType
but do not specify SourceIds
,\n you receive notice of the events for that source type for all your RDS sources. If you\n don't specify either the SourceType or the SourceIds
, you are notified of events\n generated from all RDS sources belonging to your customer account.
RDS event notification is only available for unencrypted SNS topics. If you specify an \n encrypted SNS topic, event notifications aren't sent for the topic.
\nCreates an RDS event notification subscription. This operation requires a topic Amazon\n Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API.\n To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the\n topic. The ARN is displayed in the SNS console.
\nYou can specify the type of source (SourceType
) that you want to be\n notified of and provide a list of RDS sources (SourceIds
) that triggers the\n events. You can also provide a list of event categories (EventCategories
)\n for events that you want to be notified of. For example, you can specify\n SourceType
= db-instance
, SourceIds
=\n mydbinstance1
, mydbinstance2
and\n EventCategories
= Availability
,\n Backup
.
If you specify both the SourceType
and SourceIds
, such as SourceType
= db-instance
\n and SourceIds
= myDBInstance1
, you are notified of all the db-instance
events for\n the specified source. If you specify a SourceType
but do not specify SourceIds
,\n you receive notice of the events for that source type for all your RDS sources. If you\n don't specify either the SourceType or the SourceIds
, you are notified of events\n generated from all RDS sources belonging to your customer account.
RDS event notification is only available for unencrypted SNS topics. If you specify an \n encrypted SNS topic, event notifications aren't sent for the topic.
\nCreates an Aurora global database\n spread across multiple Amazon Web Services Regions. The global database\n contains a single primary cluster with read-write capability,\n and a read-only secondary cluster that receives\n data from the primary cluster through high-speed replication\n performed by the Aurora storage subsystem.
\nYou can create a global database that is initially empty, and then\n add a primary cluster and a secondary cluster to it.\n Or you can specify an existing Aurora cluster during the create operation,\n and this cluster becomes the primary cluster of the global database.
\nThis action only applies to Aurora DB clusters.
\nCreates an Aurora global database\n spread across multiple Amazon Web Services Regions. The global database\n contains a single primary cluster with read-write capability,\n and a read-only secondary cluster that receives\n data from the primary cluster through high-speed replication\n performed by the Aurora storage subsystem.
\nYou can create a global database that is initially empty, and then\n add a primary cluster and a secondary cluster to it.\n Or you can specify an existing Aurora cluster during the create operation,\n and this cluster becomes the primary cluster of the global database.
\nThis action applies only to Aurora DB clusters.
\nThe name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon\n Aurora will not create a database in the global database cluster you are creating.
" + "smithy.api#documentation": "The name for your database of up to 64 alphanumeric characters. If you do not provide\n a name, Amazon Aurora will not create a database in the global database cluster you are\n creating.
" } }, "StorageEncrypted": { @@ -4325,7 +4325,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
\nThis setting is only for non-Aurora Multi-AZ DB clusters.
" + "smithy.api#documentation": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
\n7
\n\n month * 31, where month is a number of months from 1-23
\n731
\nFor example, the following values are valid:
\n93 (3 months * 31)
\n341 (11 months * 31)
\n589 (19 months * 31)
\n731
\nThis setting is only for non-Aurora Multi-AZ DB clusters.
" } }, "ServerlessV2ScalingConfiguration": { @@ -5733,7 +5733,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
" + "smithy.api#documentation": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
\n7
\n\n month * 31, where month is a number of months from 1-23
\n731
\nFor example, the following values are valid:
\n93 (3 months * 31)
\n341 (11 months * 31)
\n589 (19 months * 31)
\n731
\nThe engine family applies to MySQL and PostgreSQL for both RDS and Aurora.
" + "smithy.api#documentation": "The kinds of databases that the proxy can connect to. This value determines which database network protocol \n the proxy recognizes when it interprets network traffic to and from the database. MYSQL
supports Aurora MySQL, \n RDS for MariaDB, and RDS for MySQL databases. POSTGRESQL
supports Aurora PostgreSQL and RDS for PostgreSQL databases.
The ID of the DB instance to retrieve the list of DB snapshots for. \n This parameter can't be used in conjunction with DBSnapshotIdentifier
.\n This parameter isn't case-sensitive.
Constraints:
\nIf supplied, must match the identifier of an existing DBInstance.
\nThe ID of the DB instance to retrieve the list of DB snapshots for. \n This parameter isn't case-sensitive.
\nConstraints:
\nIf supplied, must match the identifier of an existing DBInstance.
\nA specific DB snapshot identifier to describe. This parameter can't be used in conjunction with DBInstanceIdentifier
. \n This value is stored as a lowercase string.
Constraints:
\nIf supplied, must match the identifier of an existing DBSnapshot.
\nIf this identifier is for an automated snapshot, the SnapshotType
parameter must also be specified.
A specific DB snapshot identifier to describe.\n This value is stored as a lowercase string.
\nConstraints:
\nIf supplied, must match the identifier of an existing DBSnapshot.
\nIf this identifier is for an automated snapshot, the SnapshotType
parameter must also be specified.
Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. \n Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be \n obtained by providing the name as a parameter.
\nBy default, RDS returns events that were generated in the past hour.
\nReturns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. \n Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be \n obtained by providing the name as a parameter.
\nFor more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora\n events in the Amazon Aurora User Guide.
\nBy default, RDS returns events that were generated in the past hour.
\nThe maximum number of records to include in the response.\n If more records exist than the specified MaxRecords
value,\n a pagination token called a marker is included in the response so that\n you can retrieve the remaining results.
Default: 100
\nConstraints: Minimum 20, maximum 100.
" + "smithy.api#documentation": "The maximum number of records to include in the response.\n If more records exist than the specified MaxRecords
value,\n a pagination token called a marker is included in the response so that\n you can retrieve the remaining results.
Default: 100
\nConstraints: Minimum 20, maximum 10000.
" } }, "Marker": { @@ -11767,7 +11767,7 @@ } }, "traits": { - "smithy.api#documentation": "This data type is used as a response element in the DescribeEvents
action.
This data type is used as a response element in the DescribeEvents action.
" } }, "com.amazonaws.rds#EventCategoriesList": { @@ -11796,7 +11796,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains the results of a successful invocation of the DescribeEventCategories
operation.
Contains the results of a successful invocation of the DescribeEventCategories\n operation.
" } }, "com.amazonaws.rds#EventCategoriesMapList": { @@ -12146,7 +12146,7 @@ } ], "traits": { - "smithy.api#documentation": "Forces a failover for a DB cluster.
\nFor an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).
\nFor a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).
\nAn Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists,\n when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby \n DB instance when the primary DB instance fails.
\nTo simulate a failure of a primary instance for testing, you can force a failover. \n Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing \n connections that use those endpoint addresses when the failover is complete.
\nFor more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
" + "smithy.api#documentation": "Forces a failover for a DB cluster.
\nFor an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).
\nFor a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances)\n in the DB cluster to be the primary DB instance (the cluster writer).
\nAn Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists,\n when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby \n DB instance when the primary DB instance fails.
\nTo simulate a failure of a primary instance for testing, you can force a failover. \n Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing \n connections that use those endpoint addresses when the failover is complete.
\nFor more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.
" } }, "com.amazonaws.rds#FailoverDBClusterMessage": { @@ -13718,7 +13718,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
\nValid for: Multi-AZ DB clusters only
" + "smithy.api#documentation": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
\n7
\n\n month * 31, where month is a number of months from 1-23
\n731
\nFor example, the following values are valid:
\n93 (3 months * 31)
\n341 (11 months * 31)
\n589 (19 months * 31)
\n731
\nIf you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
\nValid for: Multi-AZ DB clusters only
" } }, "ServerlessV2ScalingConfiguration": { @@ -13746,7 +13746,7 @@ } ], "traits": { - "smithy.api#documentation": "Modifies the parameters of a DB cluster parameter group. To modify more than one parameter,\n submit a list of the following: ParameterName
, ParameterValue
, \n and ApplyMethod
. A maximum of 20\n parameters can be modified in a single request.
After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database
parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters
action to verify \n that your DB cluster parameter group has been created or modified.
If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora\n applies the update immediately. The cluster restart might interrupt your workload. In that case,\n your application must reopen any connections and retry any transactions that were active\n when the parameter changes took effect.
\nFor more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n
" + "smithy.api#documentation": "Modifies the parameters of a DB cluster parameter group. To modify more than one parameter,\n submit a list of the following: ParameterName
, ParameterValue
, \n and ApplyMethod
. A maximum of 20\n parameters can be modified in a single request.
After you create a DB cluster parameter group, you should wait at least 5 minutes\n before creating your first DB cluster that uses that DB cluster parameter group as the default parameter \n group. This allows Amazon RDS to fully complete the create action before the parameter \n group is used as the default for a new DB cluster. This is especially important for parameters \n that are critical when creating the default database for a DB cluster, such as the character set \n for the default database defined by the character_set_database
parameter. You can use the \n Parameter Groups option of the Amazon RDS console or the \n DescribeDBClusterParameters
operation to verify \n that your DB cluster parameter group has been created or modified.
If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora\n applies the update immediately. The cluster restart might interrupt your workload. In that case,\n your application must reopen any connections and retry any transactions that were active\n when the parameter changes took effect.
\nFor more information on Amazon Aurora DB clusters, see \n \n What is Amazon Aurora? in the Amazon Aurora User Guide.
\nFor more information on Multi-AZ DB clusters, see \n \n Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.\n
" } }, "com.amazonaws.rds#ModifyDBClusterParameterGroupMessage": { @@ -13799,7 +13799,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.
\nTo share a manual DB cluster snapshot with other Amazon Web Services accounts, specify\n restore
as the AttributeName
and use the\n ValuesToAdd
parameter to add a list of IDs of the Amazon Web Services accounts that are\n authorized to restore the manual DB cluster snapshot. Use the value all
to\n make the manual DB cluster snapshot public, which means that it can be copied or\n restored by all Amazon Web Services accounts.
Don't add the all
value for any manual DB cluster snapshots\n that contain private information that you don't want available to all Amazon Web Services\n accounts.
If a manual DB cluster snapshot is encrypted, it can be shared, but only by\n specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd
\n parameter. You can't use all
as a value for that parameter in this\n case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster\n snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are\n returned as values for the restore
attribute.
Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.
\nTo share a manual DB cluster snapshot with other Amazon Web Services accounts, specify\n restore
as the AttributeName
and use the\n ValuesToAdd
parameter to add a list of IDs of the Amazon Web Services accounts that are\n authorized to restore the manual DB cluster snapshot. Use the value all
to\n make the manual DB cluster snapshot public, which means that it can be copied or\n restored by all Amazon Web Services accounts.
Don't add the all
value for any manual DB cluster snapshots\n that contain private information that you don't want available to all Amazon Web Services\n accounts.
If a manual DB cluster snapshot is encrypted, it can be shared, but only by\n specifying a list of authorized Amazon Web Services account IDs for the ValuesToAdd
\n parameter. You can't use all
as a value for that parameter in this\n case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster\n snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are\n returned as values for the restore
attribute.
The name of the DB cluster snapshot attribute to modify.
\nTo manage authorization for other Amazon Web Services accounts to copy or restore a manual DB cluster snapshot, \n set this value to restore
.
To view the list of attributes available to modify, use the\n DescribeDBClusterSnapshotAttributes API action.
\nThe name of the DB cluster snapshot attribute to modify.
\nTo manage authorization for other Amazon Web Services accounts to copy or restore a manual DB cluster snapshot, \n set this value to restore
.
To view the list of attributes available to modify, use the\n DescribeDBClusterSnapshotAttributes API operation.
\nThe new compute and memory capacity of the DB instance, for example db.m4.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes,\n and availability for your engine, see\n DB Instance Class in the Amazon RDS User Guide.
\nIf you modify the DB instance class, an outage occurs during the change.\n The change is applied during the next maintenance window,\n unless ApplyImmediately
is enabled for this request.
This setting doesn't apply to RDS Custom for Oracle.
\nDefault: Uses existing setting
" + "smithy.api#documentation": "The new compute and memory capacity of the DB instance, for example db.m5.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes, and availability for your engine, see\n DB instance \n classes in the Amazon RDS User Guide or \n Aurora \n DB instance classes in the Amazon Aurora User Guide.
\nIf you modify the DB instance class, an outage occurs during the change.\n The change is applied during the next maintenance window,\n unless ApplyImmediately
is enabled for this request.
This setting doesn't apply to RDS Custom for Oracle.
\nDefault: Uses existing setting
" } }, "DBSubnetGroupName": { @@ -13967,7 +13967,7 @@ "MasterUserPassword": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The new password for the master user. The password can include any printable ASCII \n character except \"/\", \"\"\", or \"@\".
\nChanging this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. \n Between the time of the request and the completion of the request,\n the MasterUserPassword
element exists in the\n PendingModifiedValues
element of the operation response.
This setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable. The password for the master user is managed by the DB cluster. For\n more information, see ModifyDBCluster
.
Default: Uses existing setting
\n\n MariaDB\n
\nConstraints: Must contain from 8 to 41 characters.
\n\n Microsoft SQL Server\n
\nConstraints: Must contain from 8 to 128 characters.
\n\n MySQL\n
\nConstraints: Must contain from 8 to 41 characters.
\n\n Oracle\n
\nConstraints: Must contain from 8 to 30 characters.
\n\n PostgreSQL\n
\nConstraints: Must contain from 8 to 128 characters.
\nAmazon RDS API actions never return the password, \n so this action provides a way to regain access to a primary instance user if the password is lost. \n This includes restoring privileges that might have been accidentally revoked.
\nThe new password for the master user. The password can include any printable ASCII \n character except \"/\", \"\"\", or \"@\".
\nChanging this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. \n Between the time of the request and the completion of the request,\n the MasterUserPassword
element exists in the\n PendingModifiedValues
element of the operation response.
This setting doesn't apply to RDS Custom.
\n\n Amazon Aurora\n
\nNot applicable. The password for the master user is managed by the DB cluster. For\n more information, see ModifyDBCluster
.
Default: Uses existing setting
\n\n MariaDB\n
\nConstraints: Must contain from 8 to 41 characters.
\n\n Microsoft SQL Server\n
\nConstraints: Must contain from 8 to 128 characters.
\n\n MySQL\n
\nConstraints: Must contain from 8 to 41 characters.
\n\n Oracle\n
\nConstraints: Must contain from 8 to 30 characters.
\n\n PostgreSQL\n
\nConstraints: Must contain from 8 to 128 characters.
\nAmazon RDS API operations never return the password, \n so this action provides a way to regain access to a primary instance user if the password is lost. \n This includes restoring privileges that might have been accidentally revoked.
\nThe number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.
\nEnabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.
\nThese changes are applied during the next maintenance window unless the ApplyImmediately
parameter is enabled\n for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously\n applied as soon as possible.
\n Amazon Aurora\n
\nNot applicable. The retention period for automated backups is managed by the DB\n cluster. For more information, see ModifyDBCluster
.
Default: Uses existing setting
\nConstraints:
\nIt must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to \n read replicas. It can't be set to 0 or 35 for an RDS Custom for Oracle DB instance.
\nIt can be specified for a MySQL read replica only if the source is running MySQL 5.6 or\n later.
\nIt can be specified for a PostgreSQL read replica only if the source is running PostgreSQL\n 9.3.5.
\nThe number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.
\nEnabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.
\nThese changes are applied during the next maintenance window unless the ApplyImmediately
parameter is enabled\n for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously\n applied as soon as possible.
\n Amazon Aurora\n
\nNot applicable. The retention period for automated backups is managed by the DB\n cluster. For more information, see ModifyDBCluster
.
Default: Uses existing setting
\nConstraints:
\nIt must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to \n read replicas. It can't be set to 0 for an RDS Custom for Oracle DB instance.
\nIt can be specified for a MySQL read replica only if the source is running MySQL 5.6 or\n later.
\nIt can be specified for a PostgreSQL read replica only if the source is running PostgreSQL\n 9.3.5.
\nA value that indicates whether to enable Performance Insights for the DB instance.
\nFor more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide..
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "A value that indicates whether to enable Performance Insights for the DB instance.
\nFor more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.
\nThis setting doesn't apply to RDS Custom.
" } }, "PerformanceInsightsKMSKeyId": { @@ -14135,7 +14135,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
\nThis setting doesn't apply to RDS Custom.
" + "smithy.api#documentation": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
\n7
\n\n month * 31, where month is a number of months from 1-23
\n731
\nFor example, the following values are valid:
\n93 (3 months * 31)
\n341 (11 months * 31)
\n589 (19 months * 31)
\n731
\nIf you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
\nThis setting doesn't apply to RDS Custom.
" } }, "CloudwatchLogsExportConfiguration": { @@ -14443,14 +14443,14 @@ "TargetGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The name of the new target group to assign to the proxy.
", + "smithy.api#documentation": "The name of the target group to modify.
", "smithy.api#required": {} } }, "DBProxyName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The name of the new proxy to which to assign the target group.
", + "smithy.api#documentation": "The name of the proxy.
", "smithy.api#required": {} } }, @@ -14516,7 +14516,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.
\nTo share a manual DB snapshot with other Amazon Web Services accounts, specify restore
\n as the AttributeName
and use the ValuesToAdd
parameter to add\n a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB snapshot.\n Uses the value all
to make the manual DB snapshot public, which means it\n can be copied or restored by all Amazon Web Services accounts.
Don't add the all
value for any manual DB snapshots that\n contain private information that you don't want available to all Amazon Web Services\n accounts.
If the manual DB snapshot is encrypted, it can be shared, but only by specifying a\n list of authorized Amazon Web Services account IDs for the ValuesToAdd
parameter. You\n can't use all
as a value for that parameter in this case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or\n whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as\n values for the restore
attribute.
Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.
\nTo share a manual DB snapshot with other Amazon Web Services accounts, specify restore
\n as the AttributeName
and use the ValuesToAdd
parameter to add\n a list of IDs of the Amazon Web Services accounts that are authorized to restore the manual DB snapshot.\n Uses the value all
to make the manual DB snapshot public, which means it\n can be copied or restored by all Amazon Web Services accounts.
Don't add the all
value for any manual DB snapshots that\n contain private information that you don't want available to all Amazon Web Services\n accounts.
If the manual DB snapshot is encrypted, it can be shared, but only by specifying a\n list of authorized Amazon Web Services account IDs for the ValuesToAdd
parameter. You\n can't use all
as a value for that parameter in this case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or\n whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as\n values for the restore
attribute.
The name of the DB snapshot attribute to modify.
\nTo manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, \n set this value to restore
.
To view the list of attributes available to modify, use the\n DescribeDBSnapshotAttributes API action.
\nThe name of the DB snapshot attribute to modify.
\nTo manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, \n set this value to restore
.
To view the list of attributes available to modify, use the\n DescribeDBSnapshotAttributes API operation.
\nYou might need to reboot your DB instance, usually for maintenance reasons. \n For example, if you make certain modifications, \n or if you change the DB parameter group associated with the DB instance, \n you must reboot the instance for the changes to take effect.
\nRebooting a DB instance restarts the database engine service. \n Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.
\nFor more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.\n
\nThis command doesn't apply to RDS Custom.
" + "smithy.api#documentation": "You might need to reboot your DB instance, usually for maintenance reasons. \n For example, if you make certain modifications, \n or if you change the DB parameter group associated with the DB instance, \n you must reboot the instance for the changes to take effect.
\nRebooting a DB instance restarts the database engine service. \n Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.
\nFor more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.\n
\nThis command doesn't apply to RDS Custom.
\nIf your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster
operation.
The name of the database engine to be used for this DB cluster.
\nValid Values: aurora
(for MySQL 5.6-compatible Aurora), aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql
\n
The name of the database engine to be used for this DB cluster.
\nValid Values: aurora
(for MySQL 5.6-compatible Aurora) and aurora-mysql
\n (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)
The version number of the database engine to use.
\nTo list all of the available engine versions for aurora
(for MySQL 5.6-compatible Aurora), use the following command:
\n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"
\n
To list all of the available engine versions for aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:
\n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"
\n
To list all of the available engine versions for aurora-postgresql
, use the following command:
\n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"
\n
\n Aurora MySQL\n
\nExample: 5.6.10a
, 5.6.mysql_aurora.1.19.2
, 5.7.12
, 5.7.mysql_aurora.2.04.5
, 8.0.mysql_aurora.3.01.0
\n
\n Aurora PostgreSQL\n
\nExample: 9.6.3
, 10.7
\n
The version number of the database engine to use.
\nTo list all of the available engine versions for aurora
(for MySQL 5.6-compatible Aurora), use the following command:
\n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"
\n
To list all of the available engine versions for aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:
\n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"
\n
\n Aurora MySQL\n
\nExample: 5.6.10a
, 5.6.mysql_aurora.1.19.2
, 5.7.mysql_aurora.2.07.1
,\n 8.0.mysql_aurora.3.02.0
\n
The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs.\n The values in the list depend on the DB engine being used.
\n\n RDS for MySQL\n
\nPossible values are error
, general
, and slowquery
.
\n RDS for PostgreSQL\n
\nPossible values are postgresql
and upgrade
.
\n Aurora MySQL\n
\nPossible values are audit
, error
, general
, and slowquery
.
\n Aurora PostgreSQL\n
\nPossible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..
\nFor more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
\nValid for: Aurora DB clusters and Multi-AZ DB clusters
" + "smithy.api#documentation": "The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs.\n The values in the list depend on the DB engine being used.
\n\n RDS for MySQL\n
\nPossible values are error
, general
, and slowquery
.
\n RDS for PostgreSQL\n
\nPossible values are postgresql
and upgrade
.
\n Aurora MySQL\n
\nPossible values are audit
, error
, general
, and slowquery
.
\n Aurora PostgreSQL\n
\nPossible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
\nFor more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
\nValid for: Aurora DB clusters and Multi-AZ DB clusters
" } }, "EngineMode": { @@ -17874,7 +17874,7 @@ "EnableCloudwatchLogsExports": { "target": "com.amazonaws.rds#LogTypeList", "traits": { - "smithy.api#documentation": "The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used.
\n\n RDS for MySQL\n
\nPossible values are error
, general
, and slowquery
.
\n RDS for PostgreSQL\n
\nPossible values are postgresql
and upgrade
.
\n Aurora MySQL\n
\nPossible values are audit
, error
, general
, and slowquery
.
\n Aurora PostgreSQL\n
\nPossible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..
\nFor more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
\nValid for: Aurora DB clusters and Multi-AZ DB clusters
" + "smithy.api#documentation": "The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values\n in the list depend on the DB engine being used.
\n\n RDS for MySQL\n
\nPossible values are error
, general
, and slowquery
.
\n RDS for PostgreSQL\n
\nPossible values are postgresql
and upgrade
.
\n Aurora MySQL\n
\nPossible values are audit
, error
, general
, and slowquery
.
\n Aurora PostgreSQL\n
\nPossible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
\nFor more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
\nValid for: Aurora DB clusters and Multi-AZ DB clusters
" } }, "DBClusterParameterGroupName": { @@ -18540,7 +18540,7 @@ "EnablePerformanceInsights": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "A value that indicates whether to enable Performance Insights for the DB instance.
\nFor more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide..
" + "smithy.api#documentation": "A value that indicates whether to enable Performance Insights for the DB instance.
\nFor more information, see \n Using Amazon Performance Insights in the Amazon RDS User Guide.
" } }, "PerformanceInsightsKMSKeyId": { @@ -18552,7 +18552,7 @@ "PerformanceInsightsRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
" + "smithy.api#documentation": "The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
\n7
\n\n month * 31, where month is a number of months from 1-23
\n731
\nFor example, the following values are valid:
\n93 (3 months * 31)
\n341 (11 months * 31)
\n589 (19 months * 31)
\n731
\nIf you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
" } }, "EnableCloudwatchLogsExports": { @@ -19111,7 +19111,7 @@ "MinCapacity": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "The maximum capacity for the Aurora DB cluster in serverless
DB engine\n mode.
The minimum capacity for an Aurora DB cluster in serverless
DB engine mode.
A URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication action to be \n called in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the\n StartDBInstanceAutomatedBackupsReplication API action that can be executed in the Amazon Web Services Region that contains\n the source DB instance.
" + "smithy.api#documentation": "In an Amazon Web Services GovCloud (US) Region, an URL that contains a Signature Version 4 signed request \n for the StartDBInstanceAutomatedBackupsReplication
operation to call \n in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the\n StartDBInstanceAutomatedBackupsReplication
API operation that can run in \n the Amazon Web Services Region that contains the source DB instance.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other\n Amazon Web Services Regions.
\nTo learn how to generate a Signature Version 4 signed request, see \n \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and\n \n Signature Version 4 Signing Process.
\nIf you are using an Amazon Web Services SDK tool or the CLI, you can specify\n SourceRegion
(or --source-region
for the CLI)\n instead of specifying PreSignedUrl
manually. Specifying\n SourceRegion
autogenerates a presigned URL that is a valid request\n for the operation that can run in the source Amazon Web Services Region.
Stops automated backup replication for a DB instance.
\nThis command doesn't apply to RDS Custom.
\nFor more information, see \n Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.\n
" + "smithy.api#documentation": "Stops automated backup replication for a DB instance.
\nThis command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.
\nFor more information, see \n Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide.\n
" } }, "com.amazonaws.rds#StopDBInstanceAutomatedBackupsReplicationMessage": { @@ -20556,7 +20556,7 @@ "Status": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The status of the VPC security group.
" + "smithy.api#documentation": "The membership status of the VPC security group.
\nCurrently, the only valid status is active
.
General information about the IP set.
+ */ +export interface IPSetMetadata { + /** + *Describes the total number of CIDR blocks currently in use by the IP set references in a firewall. To determine how many CIDR blocks are available for you to use in a firewall, you can call AvailableCIDRCount
.
Summarizes the CIDR blocks used by the IP set references in a firewall. Network Firewall calculates the number of CIDRs by taking an aggregated count of all CIDRs used by the IP sets you are referencing.
+ */ +export interface CIDRSummary { + /** + *The number of CIDR blocks available for use by the IP set references in a firewall.
+ */ + AvailableCIDRCount?: number; + + /** + *The number of CIDR blocks used by the IP set references in a firewall.
+ */ + UtilizedCIDRCount?: number; + + /** + *The list of the IP set references used by a firewall.
+ */ + IPSetReferences?: RecordThe capacity usage summary of the resources used by the ReferenceSets in a firewall.
+ */ +export interface CapacityUsageSummary { + /** + *Describes the capacity usage of the CIDR blocks used by the IP set references in a firewall.
+ */ + CIDRs?: CIDRSummary; +} + +export namespace CapacityUsageSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CapacityUsageSummary): any => ({ + ...obj, + }); +} + export enum ConfigurationSyncState { + CAPACITY_CONSTRAINED = "CAPACITY_CONSTRAINED", IN_SYNC = "IN_SYNC", PENDING = "PENDING", } @@ -699,6 +767,7 @@ export enum FirewallStatusValue { } export enum PerObjectSyncStatus { + CAPACITY_CONSTRAINED = "CAPACITY_CONSTRAINED", IN_SYNC = "IN_SYNC", PENDING = "PENDING", } @@ -810,6 +879,11 @@ export interface FirewallStatus { * and configuration object. */ SyncStates?: RecordDescribes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calclulates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.
+ */ + CapacityUsageSummary?: CapacityUsageSummary; } export namespace FirewallStatus { @@ -1268,6 +1342,47 @@ export namespace CreateFirewallPolicyResponse { }); } +/** + *Configures one or more IP set references for a Suricata-compatible rule group. This is used in CreateRuleGroup or UpdateRuleGroup. An IP set reference is a rule variable that references a resource that you create and manage in another Amazon Web Services service, such as an Amazon VPC prefix list. Network Firewall IP set references enable you to dynamically update the contents of your rules. When you create, update, or delete the IP set you are referencing in your rule, Network Firewall automatically updates the rule's content with the changes. For more information about IP set references in Network Firewall, see Using IP set references in the Network Firewall Developer Guide.
+ *+ * Network Firewall currently supports only Amazon VPC prefix lists as IP set references. + *
+ */ +export interface IPSetReference { + /** + *The Amazon Resource Name (ARN) of the resource that you are referencing in your rule group.
+ */ + ReferenceArn?: string; +} + +export namespace IPSetReference { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IPSetReference): any => ({ + ...obj, + }); +} + +/** + *Contains a set of IP set references.
+ */ +export interface ReferenceSets { + /** + *The list of IP set references.
+ */ + IPSetReferences?: RecordThe list of a rule group's reference sets.
+ */ + ReferenceSets?: ReferenceSets; + /** *The stateful rules or stateless rules for the rule group.
*/ diff --git a/clients/client-network-firewall/src/protocols/Aws_json1_0.ts b/clients/client-network-firewall/src/protocols/Aws_json1_0.ts index f3bb6ff4df143..60c83e340a4d1 100644 --- a/clients/client-network-firewall/src/protocols/Aws_json1_0.ts +++ b/clients/client-network-firewall/src/protocols/Aws_json1_0.ts @@ -109,6 +109,8 @@ import { AssociateSubnetsRequest, AssociateSubnetsResponse, Attachment, + CapacityUsageSummary, + CIDRSummary, CreateFirewallPolicyRequest, CreateFirewallPolicyResponse, CreateFirewallRequest, @@ -154,6 +156,8 @@ import { InvalidResourcePolicyException, InvalidTokenException, IPSet, + IPSetMetadata, + IPSetReference, LimitExceededException, ListFirewallPoliciesRequest, ListFirewallPoliciesResponse, @@ -173,6 +177,7 @@ import { PublishMetricAction, PutResourcePolicyRequest, PutResourcePolicyResponse, + ReferenceSets, ResourceNotFoundException, ResourceOwnerCheckException, RuleDefinition, @@ -2842,6 +2847,24 @@ const serializeAws_json1_0IPSet = (input: IPSet, context: __SerdeContext): any = }; }; +const serializeAws_json1_0IPSetReference = (input: IPSetReference, context: __SerdeContext): any => { + return { + ...(input.ReferenceArn != null && { ReferenceArn: input.ReferenceArn }), + }; +}; + +const serializeAws_json1_0IPSetReferenceMap = (input: RecordThe number of CIDR blocks available for use by the IP set references in a firewall.
" + } + }, + "UtilizedCIDRCount": { + "target": "com.amazonaws.networkfirewall#CIDRCount", + "traits": { + "smithy.api#documentation": "The number of CIDR blocks used by the IP set references in a firewall.
" + } + }, + "IPSetReferences": { + "target": "com.amazonaws.networkfirewall#IPSetMetadataMap", + "traits": { + "smithy.api#documentation": "The list of the IP set references used by a firewall.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Summarizes the CIDR blocks used by the IP set references in a firewall. Network Firewall calculates the number of CIDRs by taking an aggregated count of all CIDRs used by the IP sets you are referencing.
" + } + }, + "com.amazonaws.networkfirewall#CapacityUsageSummary": { + "type": "structure", + "members": { + "CIDRs": { + "target": "com.amazonaws.networkfirewall#CIDRSummary", + "traits": { + "smithy.api#documentation": "Describes the capacity usage of the CIDR blocks used by the IP set references in a firewall.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The capacity usage summary of the resources used by the ReferenceSets in a firewall.
" + } + }, "com.amazonaws.networkfirewall#CollectionMember_String": { "type": "string" }, @@ -354,6 +404,10 @@ { "value": "IN_SYNC", "name": "IN_SYNC" + }, + { + "value": "CAPACITY_CONSTRAINED", + "name": "CAPACITY_CONSTRAINED" } ] } @@ -1824,6 +1878,12 @@ "traits": { "smithy.api#documentation": "The subnets that you've configured for use by the Network Firewall firewall. This contains\n one array element per Availability Zone where you've configured a subnet. These objects\n provide details of the information that is summarized in the\n ConfigurationSyncStateSummary
and Status
, broken down by zone\n and configuration object.
Describes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calclulates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.
" + } } }, "traits": { @@ -1961,6 +2021,65 @@ "smithy.api#documentation": "A list of IP addresses and address ranges, in CIDR notation. This is part of a RuleVariables.
" } }, + "com.amazonaws.networkfirewall#IPSetArn": { + "type": "string" + }, + "com.amazonaws.networkfirewall#IPSetMetadata": { + "type": "structure", + "members": { + "ResolvedCIDRCount": { + "target": "com.amazonaws.networkfirewall#CIDRCount", + "traits": { + "smithy.api#documentation": "Describes the total number of CIDR blocks currently in use by the IP set references in a firewall. To determine how many CIDR blocks are available for you to use in a firewall, you can call AvailableCIDRCount
.
General information about the IP set.
" + } + }, + "com.amazonaws.networkfirewall#IPSetMetadataMap": { + "type": "map", + "key": { + "target": "com.amazonaws.networkfirewall#IPSetArn" + }, + "value": { + "target": "com.amazonaws.networkfirewall#IPSetMetadata" + } + }, + "com.amazonaws.networkfirewall#IPSetReference": { + "type": "structure", + "members": { + "ReferenceArn": { + "target": "com.amazonaws.networkfirewall#ResourceArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource that you are referencing in your rule group.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Configures one or more IP set references for a Suricata-compatible rule group. This is used in CreateRuleGroup or UpdateRuleGroup. An IP set reference is a rule variable that references a resource that you create and manage in another Amazon Web Services service, such as an Amazon VPC prefix list. Network Firewall IP set references enable you to dynamically update the contents of your rules. When you create, update, or delete the IP set you are referencing in your rule, Network Firewall automatically updates the rule's content with the changes. For more information about IP set references in Network Firewall, see Using IP set references in the Network Firewall Developer Guide.
\n\n Network Firewall currently supports only Amazon VPC prefix lists as IP set references.\n
" + } + }, + "com.amazonaws.networkfirewall#IPSetReferenceMap": { + "type": "map", + "key": { + "target": "com.amazonaws.networkfirewall#IPSetReferenceName" + }, + "value": { + "target": "com.amazonaws.networkfirewall#IPSetReference" + } + }, + "com.amazonaws.networkfirewall#IPSetReferenceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 32 + }, + "smithy.api#pattern": "^[A-Za-z][A-Za-z0-9_]*$" + } + }, "com.amazonaws.networkfirewall#IPSets": { "type": "map", "key": { @@ -2699,6 +2818,10 @@ { "value": "IN_SYNC", "name": "IN_SYNC" + }, + { + "value": "CAPACITY_CONSTRAINED", + "name": "CAPACITY_CONSTRAINED" } ] } @@ -2874,6 +2997,20 @@ "type": "structure", "members": {} }, + "com.amazonaws.networkfirewall#ReferenceSets": { + "type": "structure", + "members": { + "IPSetReferences": { + "target": "com.amazonaws.networkfirewall#IPSetReferenceMap", + "traits": { + "smithy.api#documentation": "The list of IP set references.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains a set of IP set references.
" + } + }, "com.amazonaws.networkfirewall#ResourceArn": { "type": "string", "traits": { @@ -3010,6 +3147,12 @@ "smithy.api#documentation": "Settings that are available for use in the rules in the rule group. You can only use\n these for stateful rule groups.
" } }, + "ReferenceSets": { + "target": "com.amazonaws.networkfirewall#ReferenceSets", + "traits": { + "smithy.api#documentation": "The list of a rule group's reference sets.
" + } + }, "RulesSource": { "target": "com.amazonaws.networkfirewall#RulesSource", "traits": { From b8837a8fe3ee514eed76a87d54e9165433ea0215 Mon Sep 17 00:00:00 2001 From: awstoolsThe log odds metric details.
+ * + *Account Takeover Insights (ATI) model uses event variables from the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
+ * The names of all the variables. + *
+ */ + variableNames: string[] | undefined; + + /** + *+ * The relative importance of the variables in the list to the other event variable. + *
+ */ + aggregatedVariablesImportance: number | undefined; +} + +export namespace AggregatedLogOddsMetric { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AggregatedLogOddsMetric): any => ({ + ...obj, + }); +} + +/** + *+ * The details of the impact of aggregated variables on the prediction score.
+ * + *Account Takeover Insights (ATI) model uses the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, the model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
+ * The names of all the event variables that were used to derive the aggregated variables. + *
+ */ + eventVariableNames?: string[]; + + /** + *+ * The relative impact of the aggregated variables in terms of magnitude on the prediction scores. + *
+ */ + relativeImpact?: string; + + /** + *+ * The raw, uninterpreted value represented as log-odds of the fraud. These values are usually between -10 to +10, but range from -infinity to +infinity.
+ *A positive value indicates that the variables drove the risk score up.
+ *A negative value indicates that the variables drove the risk score down.
+ *The details of the relative importance of the aggregated variables.
+ * + *Account Takeover Insights (ATI) model uses event variables from the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
+ * List of variables' metrics. + *
+ */ + logOddsMetrics?: AggregatedLogOddsMetric[]; +} + +export namespace AggregatedVariablesImportanceMetrics { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AggregatedVariablesImportanceMetrics): any => ({ + ...obj, + }); +} + export enum AsyncJobStatus { CANCELED = "CANCELED", CANCEL_IN_PROGRESS = "CANCEL_IN_PROGRESS", @@ -31,6 +135,110 @@ export enum AsyncJobStatus { IN_PROGRESS_INITIALIZING = "IN_PROGRESS_INITIALIZING", } +/** + *+ * The Account Takeover Insights (ATI) model performance metrics data points. + *
+ */ +export interface ATIMetricDataPoint { + /** + *+ * The challenge rate. This indicates the percentage of login events that the model recommends to challenge such as + * one-time password, multi-factor authentication, and investigations. + *
+ */ + cr?: number; + + /** + *+ * The anomaly discovery rate. This metric quantifies the percentage of anomalies that can be detected by the model at the selected score threshold. + * A lower score threshold increases the percentage of anomalies captured by the model, but would also require challenging a larger percentage of + * login events, leading to a higher customer friction. + *
+ */ + adr?: number; + + /** + *+ * The model's threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is + * labeled as fraud. + *
+ */ + threshold?: number; + + /** + *+ * The account takeover discovery rate. This metric quantifies the percentage of account compromise events that can be detected by the model at the selected score threshold. + * This metric is only available if 50 or more entities with at-least one labeled account takeover event is present in the ingested dataset. + *
+ */ + atodr?: number; +} + +export namespace ATIMetricDataPoint { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ATIMetricDataPoint): any => ({ + ...obj, + }); +} + +/** + *+ * The Account Takeover Insights (ATI) model performance score. + *
+ */ +export interface ATIModelPerformance { + /** + *+ * The anomaly separation index (ASI) score. This metric summarizes the overall ability of the model to separate anomalous activities from the normal behavior. Depending on the business, a + * large fraction of these anomalous activities can be malicious and correspond to the account takeover attacks. A model with no separability power will have the lowest possible + * ASI score of 0.5, whereas the a model with a high separability power will have the highest possible ASI score of 1.0 + *
+ */ + asi?: number; +} + +export namespace ATIModelPerformance { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ATIModelPerformance): any => ({ + ...obj, + }); +} + +/** + *+ * The Account Takeover Insights (ATI) model training metric details. + *
+ */ +export interface ATITrainingMetricsValue { + /** + *+ * The model's performance metrics data points. + *
+ */ + metricDataPoints?: ATIMetricDataPoint[]; + + /** + *+ * The model's overall performance scores. + *
+ */ + modelPerformance?: ATIModelPerformance; +} + +export namespace ATITrainingMetricsValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ATITrainingMetricsValue): any => ({ + ...obj, + }); +} + /** *A key and value pair.
*/ @@ -554,6 +762,7 @@ export namespace CreateBatchPredictionJobResult { } export enum ModelTypeEnum { + ACCOUNT_TAKEOVER_INSIGHTS = "ACCOUNT_TAKEOVER_INSIGHTS", ONLINE_FRAUD_INSIGHTS = "ONLINE_FRAUD_INSIGHTS", TRANSACTION_FRAUD_INSIGHTS = "TRANSACTION_FRAUD_INSIGHTS", } @@ -836,7 +1045,7 @@ export interface LabelSchema { *The label mapper maps the Amazon Fraud Detector supported model classification labels (FRAUD
, LEGIT
) to the appropriate event type labels. For example, if "FRAUD
" and "LEGIT
" are Amazon Fraud Detector supported labels, this mapper could be: {"FRAUD" => ["0"]
, "LEGIT" => ["1"]}
or {"FRAUD" => ["false"]
, "LEGIT" => ["true"]}
or {"FRAUD" => ["fraud", "abuse"]
, "LEGIT" => ["legit", "safe"]}
. The value part of the mapper is a list, because you may have multiple label variants from your event type for a single Amazon Fraud Detector label.
*
The action to take for unlabeled events.
@@ -865,7 +1074,7 @@ export interface TrainingDataSchema { /** *The label schema.
*/ - labelSchema: LabelSchema | undefined; + labelSchema?: LabelSchema; } export namespace TrainingDataSchema { @@ -1746,11 +1955,11 @@ export namespace FileValidationMessage { } /** - *The model training validation messages.
+ *The model training data validation metrics.
*/ export interface DataValidationMetrics { /** - *The file-specific model training validation messages.
+ *The file-specific model training data validation messages.
*/ fileLevelMessages?: FileValidationMessage[]; @@ -1904,6 +2113,281 @@ export namespace TrainingResult { }); } +/** + *+ * The Online Fraud Insights (OFI) model performance metrics data points. + *
+ */ +export interface OFIMetricDataPoint { + /** + *+ * The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud. + *
+ */ + fpr?: number; + + /** + *+ * The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent. + *
+ */ + precision?: number; + + /** + *+ * The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate. + *
+ */ + tpr?: number; + + /** + *+ * The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud. + *
+ */ + threshold?: number; +} + +export namespace OFIMetricDataPoint { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OFIMetricDataPoint): any => ({ + ...obj, + }); +} + +/** + *+ * The Online Fraud Insights (OFI) model performance score. + *
+ */ +export interface OFIModelPerformance { + /** + *+ * The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds. + *
+ */ + auc?: number; +} + +export namespace OFIModelPerformance { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OFIModelPerformance): any => ({ + ...obj, + }); +} + +/** + *+ * The Online Fraud Insights (OFI) model training metric details. + *
+ */ +export interface OFITrainingMetricsValue { + /** + *+ * The model's performance metrics data points. + *
+ */ + metricDataPoints?: OFIMetricDataPoint[]; + + /** + *+ * The model's overall performance score. + *
+ */ + modelPerformance?: OFIModelPerformance; +} + +export namespace OFITrainingMetricsValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OFITrainingMetricsValue): any => ({ + ...obj, + }); +} + +/** + *+ * The performance metrics data points for Transaction Fraud Insights (TFI) model. + *
+ */ +export interface TFIMetricDataPoint { + /** + *+ * The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud. + *
+ */ + fpr?: number; + + /** + *+ * The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent. + *
+ */ + precision?: number; + + /** + *+ * The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate. + *
+ */ + tpr?: number; + + /** + *+ * The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any + * model score 500 or above is labeled as fraud. + *
+ */ + threshold?: number; +} + +export namespace TFIMetricDataPoint { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TFIMetricDataPoint): any => ({ + ...obj, + }); +} + +/** + *+ * The Transaction Fraud Insights (TFI) model performance score. + *
+ */ +export interface TFIModelPerformance { + /** + *+ * The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds. + *
+ */ + auc?: number; +} + +export namespace TFIModelPerformance { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TFIModelPerformance): any => ({ + ...obj, + }); +} + +/** + *+ * The Transaction Fraud Insights (TFI) model training metric details. + *
+ */ +export interface TFITrainingMetricsValue { + /** + *+ * The model's performance metrics data points. + *
+ */ + metricDataPoints?: TFIMetricDataPoint[]; + + /** + *+ * The model performance score. + *
+ */ + modelPerformance?: TFIModelPerformance; +} + +export namespace TFITrainingMetricsValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TFITrainingMetricsValue): any => ({ + ...obj, + }); +} + +/** + *+ * The training metrics details. + *
+ */ +export interface TrainingMetricsV2 { + /** + *+ * The Online Fraud Insights (OFI) model training metric details. + *
+ */ + ofi?: OFITrainingMetricsValue; + + /** + *+ * The Transaction Fraud Insights (TFI) model training metric details. + *
+ */ + tfi?: TFITrainingMetricsValue; + + /** + *+ * The Account Takeover Insights (ATI) model training metric details. + *
+ */ + ati?: ATITrainingMetricsValue; +} + +export namespace TrainingMetricsV2 { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TrainingMetricsV2): any => ({ + ...obj, + }); +} + +/** + *+ * The training result details. + *
+ */ +export interface TrainingResultV2 { + /** + *The model training data validation metrics.
+ */ + dataValidationMetrics?: DataValidationMetrics; + + /** + *+ * The training metric details. + *
+ */ + trainingMetricsV2?: TrainingMetricsV2; + + /** + *The variable importance metrics details.
+ */ + variableImportanceMetrics?: VariableImportanceMetrics; + + /** + *+ * The variable importance metrics of the aggregated variables. + *
+ *Account Takeover Insights (ATI) model uses event variables from the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
The details of the model version.
*/ @@ -1967,6 +2451,13 @@ export interface ModelVersionDetail { *The model version ARN.
*/ arn?: string; + + /** + *+ * The training result details. The details include the relative importance of the variables. + *
+ */ + trainingResultV2?: TrainingResultV2; } export namespace ModelVersionDetail { @@ -3071,6 +3562,17 @@ export interface PredictionExplanations { * */ variableImpactExplanations?: VariableImpactExplanation[]; + + /** + *+ * The details of the aggregated variables impact on the prediction score. + *
+ * + *Account Takeover Insights (ATI) model uses event variables from the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
\n The challenge rate. This indicates the percentage of login events that the model recommends to challenge such as \n one-time password, multi-factor authentication, and investigations.\n
" + } + }, + "adr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The anomaly discovery rate. This metric quantifies the percentage of anomalies that can be detected by the model at the selected score threshold. \n A lower score threshold increases the percentage of anomalies captured by the model, but would also require challenging a larger percentage of \n login events, leading to a higher customer friction.\n
" + } + }, + "threshold": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The model's threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is \n labeled as fraud.\n
" + } + }, + "atodr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The account takeover discovery rate. This metric quantifies the percentage of account compromise events that can be detected by the model at the selected score threshold.\n This metric is only available if 50 or more entities with at-least one labeled account takeover event is present in the ingested dataset.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The Account Takeover Insights (ATI) model performance metrics data points.\n
" + } + }, + "com.amazonaws.frauddetector#ATIMetricDataPointsList": { + "type": "list", + "member": { + "target": "com.amazonaws.frauddetector#ATIMetricDataPoint" + } + }, + "com.amazonaws.frauddetector#ATIModelPerformance": { + "type": "structure", + "members": { + "asi": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The anomaly separation index (ASI) score. This metric summarizes the overall ability of the model to separate anomalous activities from the normal behavior. Depending on the business, a \n large fraction of these anomalous activities can be malicious and correspond to the account takeover attacks. A model with no separability power will have the lowest possible \n ASI score of 0.5, whereas the a model with a high separability power will have the highest possible ASI score of 1.0\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The Account Takeover Insights (ATI) model performance score.\n
" + } + }, + "com.amazonaws.frauddetector#ATITrainingMetricsValue": { + "type": "structure", + "members": { + "metricDataPoints": { + "target": "com.amazonaws.frauddetector#ATIMetricDataPointsList", + "traits": { + "smithy.api#documentation": "\n The model's performance metrics data points.\n
" + } + }, + "modelPerformance": { + "target": "com.amazonaws.frauddetector#ATIModelPerformance", + "traits": { + "smithy.api#documentation": "\n The model's overall performance scores.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The Account Takeover Insights (ATI) model training metric details. \n
" + } + }, "com.amazonaws.frauddetector#AWSHawksNestServiceFacade": { "type": "service", "traits": { @@ -273,6 +345,68 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.frauddetector#AggregatedLogOddsMetric": { + "type": "structure", + "members": { + "variableNames": { + "target": "com.amazonaws.frauddetector#ListOfStrings", + "traits": { + "smithy.api#documentation": "\n The names of all the variables. \n
", + "smithy.api#required": {} + } + }, + "aggregatedVariablesImportance": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The relative importance of the variables in the list to the other event variable. \n
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The log odds metric details.
\n \nAccount Takeover Insights (ATI) model uses event variables from the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address
and user
.
\n The names of all the event variables that were used to derive the aggregated variables. \n
" + } + }, + "relativeImpact": { + "target": "com.amazonaws.frauddetector#string", + "traits": { + "smithy.api#documentation": "\n The relative impact of the aggregated variables in terms of magnitude on the prediction scores. \n
" + } + }, + "logOddsImpact": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The raw, uninterpreted value represented as log-odds of the fraud. These values are usually between -10 to +10, but range from -infinity to +infinity.
\nA positive value indicates that the variables drove the risk score up.
\nA negative value indicates that the variables drove the risk score down.
\n\n The details of the impact of aggregated variables on the prediction score.
\n \nAccount Takeover Insights (ATI) model uses the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, the model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address
and user
.
\n List of variables' metrics.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "The details of the relative importance of the aggregated variables.
\n \nAccount Takeover Insights (ATI) model uses event variables from the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address
and user
.
The file-specific model training validation messages.
" + "smithy.api#documentation": "The file-specific model training data validation messages.
" } }, "fieldLevelMessages": { @@ -1473,7 +1607,7 @@ } }, "traits": { - "smithy.api#documentation": "The model training validation messages.
" + "smithy.api#documentation": "The model training data validation metrics.
" } }, "com.amazonaws.frauddetector#DeleteAuditHistory": { @@ -4834,8 +4968,7 @@ "labelMapper": { "target": "com.amazonaws.frauddetector#labelMapper", "traits": { - "smithy.api#documentation": "The label mapper maps the Amazon Fraud Detector supported model classification labels (FRAUD
, LEGIT
) to the appropriate event type labels. For example, if \"FRAUD
\" and \"LEGIT
\" are Amazon Fraud Detector supported labels, this mapper could be: {\"FRAUD\" => [\"0\"]
, \"LEGIT\" => [\"1\"]}
or {\"FRAUD\" => [\"false\"]
, \"LEGIT\" => [\"true\"]}
or {\"FRAUD\" => [\"fraud\", \"abuse\"]
, \"LEGIT\" => [\"legit\", \"safe\"]}
. The value part of the mapper is a list, because you may have multiple label variants from your event type for a single Amazon Fraud Detector label.\n
The label mapper maps the Amazon Fraud Detector supported model classification labels (FRAUD
, LEGIT
) to the appropriate event type labels. For example, if \"FRAUD
\" and \"LEGIT
\" are Amazon Fraud Detector supported labels, this mapper could be: {\"FRAUD\" => [\"0\"]
, \"LEGIT\" => [\"1\"]}
or {\"FRAUD\" => [\"false\"]
, \"LEGIT\" => [\"true\"]}
or {\"FRAUD\" => [\"fraud\", \"abuse\"]
, \"LEGIT\" => [\"legit\", \"safe\"]}
. The value part of the mapper is a list, because you may have multiple label variants from your event type for a single Amazon Fraud Detector label.\n
The model version ARN.
" } + }, + "trainingResultV2": { + "target": "com.amazonaws.frauddetector#TrainingResultV2", + "traits": { + "smithy.api#documentation": "\n The training result details. The details include the relative importance of the variables.\n
" + } } }, "traits": { @@ -5591,6 +5746,78 @@ } } }, + "com.amazonaws.frauddetector#OFIMetricDataPoint": { + "type": "structure", + "members": { + "fpr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.\n
" + } + }, + "precision": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.\n
" + } + }, + "tpr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.\n
" + } + }, + "threshold": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The Online Fraud Insights (OFI) model performance metrics data points. \n
" + } + }, + "com.amazonaws.frauddetector#OFIMetricDataPointsList": { + "type": "list", + "member": { + "target": "com.amazonaws.frauddetector#OFIMetricDataPoint" + } + }, + "com.amazonaws.frauddetector#OFIModelPerformance": { + "type": "structure", + "members": { + "auc": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The Online Fraud Insights (OFI) model performance score.\n
" + } + }, + "com.amazonaws.frauddetector#OFITrainingMetricsValue": { + "type": "structure", + "members": { + "metricDataPoints": { + "target": "com.amazonaws.frauddetector#OFIMetricDataPointsList", + "traits": { + "smithy.api#documentation": "\n The model's performance metrics data points.\n
" + } + }, + "modelPerformance": { + "target": "com.amazonaws.frauddetector#OFIModelPerformance", + "traits": { + "smithy.api#documentation": "\n The model's overall performance score.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The Online Fraud Insights (OFI) model training metric details. \n
" + } + }, "com.amazonaws.frauddetector#Outcome": { "type": "structure", "members": { @@ -5653,6 +5880,12 @@ "traits": { "smithy.api#documentation": "\nThe details of the event variable's impact on the prediction score.\n
" } + }, + "aggregatedVariablesImpactExplanations": { + "target": "com.amazonaws.frauddetector#ListOfAggregatedVariablesImpactExplanations", + "traits": { + "smithy.api#documentation": "\n The details of the aggregated variables impact on the prediction score. \n
\n \nAccount Takeover Insights (ATI) model uses event variables from the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address
and user
.
\n The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud.\n
" + } + }, + "precision": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent.\n
" + } + }, + "tpr": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate.\n
" + } + }, + "threshold": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any \n model score 500 or above is labeled as fraud.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The performance metrics data points for Transaction Fraud Insights (TFI) model. \n
" + } + }, + "com.amazonaws.frauddetector#TFIMetricDataPointsList": { + "type": "list", + "member": { + "target": "com.amazonaws.frauddetector#TFIMetricDataPoint" + } + }, + "com.amazonaws.frauddetector#TFIModelPerformance": { + "type": "structure", + "members": { + "auc": { + "target": "com.amazonaws.frauddetector#float", + "traits": { + "smithy.api#documentation": "\n The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The Transaction Fraud Insights (TFI) model performance score.\n
" + } + }, + "com.amazonaws.frauddetector#TFITrainingMetricsValue": { + "type": "structure", + "members": { + "metricDataPoints": { + "target": "com.amazonaws.frauddetector#TFIMetricDataPointsList", + "traits": { + "smithy.api#documentation": "\n The model's performance metrics data points.\n
" + } + }, + "modelPerformance": { + "target": "com.amazonaws.frauddetector#TFIModelPerformance", + "traits": { + "smithy.api#documentation": "\n The model performance score.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The Transaction Fraud Insights (TFI) model training metric details. \n
" + } + }, "com.amazonaws.frauddetector#Tag": { "type": "structure", "members": { @@ -6512,10 +6817,7 @@ } }, "labelSchema": { - "target": "com.amazonaws.frauddetector#LabelSchema", - "traits": { - "smithy.api#required": {} - } + "target": "com.amazonaws.frauddetector#LabelSchema" } }, "traits": { @@ -6557,6 +6859,32 @@ "smithy.api#documentation": "The training metric details.
" } }, + "com.amazonaws.frauddetector#TrainingMetricsV2": { + "type": "structure", + "members": { + "ofi": { + "target": "com.amazonaws.frauddetector#OFITrainingMetricsValue", + "traits": { + "smithy.api#documentation": "\n The Online Fraud Insights (OFI) model training metric details. \n
" + } + }, + "tfi": { + "target": "com.amazonaws.frauddetector#TFITrainingMetricsValue", + "traits": { + "smithy.api#documentation": "\n The Transaction Fraud Insights (TFI) model training metric details. \n
" + } + }, + "ati": { + "target": "com.amazonaws.frauddetector#ATITrainingMetricsValue", + "traits": { + "smithy.api#documentation": "\n The Account Takeover Insights (ATI) model training metric details. \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The training metrics details.\n
" + } + }, "com.amazonaws.frauddetector#TrainingResult": { "type": "structure", "members": { @@ -6583,6 +6911,32 @@ "smithy.api#documentation": "The training result details.
" } }, + "com.amazonaws.frauddetector#TrainingResultV2": { + "type": "structure", + "members": { + "dataValidationMetrics": { + "target": "com.amazonaws.frauddetector#DataValidationMetrics" + }, + "trainingMetricsV2": { + "target": "com.amazonaws.frauddetector#TrainingMetricsV2", + "traits": { + "smithy.api#documentation": "\n The training metric details.\n
" + } + }, + "variableImportanceMetrics": { + "target": "com.amazonaws.frauddetector#VariableImportanceMetrics" + }, + "aggregatedVariablesImportanceMetrics": { + "target": "com.amazonaws.frauddetector#AggregatedVariablesImportanceMetrics", + "traits": { + "smithy.api#documentation": "\n The variable importance metrics of the aggregated variables.\n
\nAccount Takeover Insights (ATI) model uses event variables from the login data you \n provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address. \n In this case, event variables used to derive the aggregated variables are IP address
and user
.
\n The training result details.\n
" + } + }, "com.amazonaws.frauddetector#UnlabeledEventsTreatment": { "type": "string", "traits": { @@ -7750,7 +8104,7 @@ "target": "com.amazonaws.frauddetector#string" }, "value": { - "target": "com.amazonaws.frauddetector#NonEmptyListOfStrings" + "target": "com.amazonaws.frauddetector#ListOfStrings" } }, "com.amazonaws.frauddetector#labelsMaxResults": { @@ -7936,7 +8290,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 1024 + "max": 8192 }, "smithy.api#sensitive": {} } From 91730562c1fe693c0270b088e5bd0c02d50372d4 Mon Sep 17 00:00:00 2001 From: awstoolsIdentifies the clone group to which the DB cluster is associated.
+ */ + CloneGroupId?: string; + /** *Specifies the time when the cluster was created, in Universal Coordinated Time * (UTC).
@@ -1986,6 +1991,11 @@ export interface CreateDBInstanceMessage { */ DBClusterIdentifier: string | undefined; + /** + *A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
+ */ + CopyTagsToSnapshot?: boolean; + /** *A value that specifies the order in which an Amazon DocumentDB replica is promoted to the * primary instance after a failure of the existing primary instance.
@@ -2434,6 +2444,11 @@ export interface DBInstance { */ CACertificateIdentifier?: string; + /** + *A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
+ */ + CopyTagsToSnapshot?: boolean; + /** *A value that specifies the order in which an Amazon DocumentDB replica is promoted to the * primary instance after a failure of the existing primary instance.
@@ -5684,6 +5699,11 @@ export interface ModifyDBInstanceMessage { */ CACertificateIdentifier?: string; + /** + *A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
+ */ + CopyTagsToSnapshot?: boolean; + /** *A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
*Default: 1
@@ -6326,6 +6346,26 @@ export interface RestoreDBClusterToPointInTimeMessage { */ DBClusterIdentifier: string | undefined; + /** + *The type of restore to be performed. You can specify one of the following values:
+ *
+ * full-copy
- The new DB cluster is restored as a full copy of the
+ * source DB cluster.
+ * copy-on-write
- The new DB cluster is restored as a clone of the
+ * source DB cluster.
Constraints: You can't specify copy-on-write
if the engine version of the source DB cluster is earlier than 1.11.
If you don't specify a RestoreType
value, then the new DB cluster is
+ * restored as a full copy of the source DB cluster.
The identifier of the source cluster from which to restore.
*Constraints:
diff --git a/clients/client-docdb/src/protocols/Aws_query.ts b/clients/client-docdb/src/protocols/Aws_query.ts index 4583ca7d61601..11bb216ad5a89 100644 --- a/clients/client-docdb/src/protocols/Aws_query.ts +++ b/clients/client-docdb/src/protocols/Aws_query.ts @@ -5068,6 +5068,9 @@ const serializeAws_queryCreateDBInstanceMessage = (input: CreateDBInstanceMessag if (input.DBClusterIdentifier !== undefined && input.DBClusterIdentifier !== null) { entries["DBClusterIdentifier"] = input.DBClusterIdentifier; } + if (input.CopyTagsToSnapshot !== undefined && input.CopyTagsToSnapshot !== null) { + entries["CopyTagsToSnapshot"] = input.CopyTagsToSnapshot; + } if (input.PromotionTier !== undefined && input.PromotionTier !== null) { entries["PromotionTier"] = input.PromotionTier; } @@ -5914,6 +5917,9 @@ const serializeAws_queryModifyDBInstanceMessage = (input: ModifyDBInstanceMessag if (input.CACertificateIdentifier !== undefined && input.CACertificateIdentifier !== null) { entries["CACertificateIdentifier"] = input.CACertificateIdentifier; } + if (input.CopyTagsToSnapshot !== undefined && input.CopyTagsToSnapshot !== null) { + entries["CopyTagsToSnapshot"] = input.CopyTagsToSnapshot; + } if (input.PromotionTier !== undefined && input.PromotionTier !== null) { entries["PromotionTier"] = input.PromotionTier; } @@ -6188,6 +6194,9 @@ const serializeAws_queryRestoreDBClusterToPointInTimeMessage = ( if (input.DBClusterIdentifier !== undefined && input.DBClusterIdentifier !== null) { entries["DBClusterIdentifier"] = input.DBClusterIdentifier; } + if (input.RestoreType !== undefined && input.RestoreType !== null) { + entries["RestoreType"] = input.RestoreType; + } if (input.SourceDBClusterIdentifier !== undefined && input.SourceDBClusterIdentifier !== null) { entries["SourceDBClusterIdentifier"] = input.SourceDBClusterIdentifier; } @@ -6620,6 +6629,7 @@ const deserializeAws_queryDBCluster = (output: any, context: __SerdeContext): DB DbClusterResourceId: undefined, DBClusterArn: undefined, AssociatedRoles: undefined, + CloneGroupId: undefined, ClusterCreateTime: undefined, EnabledCloudwatchLogsExports: undefined, DeletionProtection: undefined, @@ -6742,6 +6752,9 @@ const deserializeAws_queryDBCluster = (output: any, context: __SerdeContext): DB context ); } + if (output["CloneGroupId"] !== undefined) { + contents.CloneGroupId = __expectString(output["CloneGroupId"]); + } if (output["ClusterCreateTime"] !== undefined) { contents.ClusterCreateTime = __expectNonNull(__parseRfc3339DateTime(output["ClusterCreateTime"])); } @@ -7306,6 +7319,7 @@ const deserializeAws_queryDBInstance = (output: any, context: __SerdeContext): D KmsKeyId: undefined, DbiResourceId: undefined, CACertificateIdentifier: undefined, + CopyTagsToSnapshot: undefined, PromotionTier: undefined, DBInstanceArn: undefined, EnabledCloudwatchLogsExports: undefined, @@ -7395,6 +7409,9 @@ const deserializeAws_queryDBInstance = (output: any, context: __SerdeContext): D if (output["CACertificateIdentifier"] !== undefined) { contents.CACertificateIdentifier = __expectString(output["CACertificateIdentifier"]); } + if (output["CopyTagsToSnapshot"] !== undefined) { + contents.CopyTagsToSnapshot = __parseBoolean(output["CopyTagsToSnapshot"]); + } if (output["PromotionTier"] !== undefined) { contents.PromotionTier = __strictParseInt32(output["PromotionTier"]) as number; } diff --git a/codegen/sdk-codegen/aws-models/docdb.json b/codegen/sdk-codegen/aws-models/docdb.json index 416a7f55de24d..7fdd7c09c7503 100644 --- a/codegen/sdk-codegen/aws-models/docdb.json +++ b/codegen/sdk-codegen/aws-models/docdb.json @@ -84,6 +84,9 @@ "input": { "target": "com.amazonaws.docdb#AddTagsToResourceMessage" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.docdb#DBClusterNotFoundFault" @@ -1150,6 +1153,12 @@ "smithy.api#required": {} } }, + "CopyTagsToSnapshot": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
" + } + }, "PromotionTier": { "target": "com.amazonaws.docdb#IntegerOptional", "traits": { @@ -1604,6 +1613,12 @@ "smithy.api#documentation": "Provides a list of the Identity and Access Management (IAM) roles that are associated with the cluster. (IAM) roles that are associated with a cluster grant permission for the cluster to access other Amazon Web Services services on your behalf.
" } }, + "CloneGroupId": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "Identifies the clone group to which the DB cluster is associated.
" + } + }, "ClusterCreateTime": { "target": "com.amazonaws.docdb#TStamp", "traits": { @@ -2337,6 +2352,12 @@ "smithy.api#documentation": "The identifier of the CA certificate for this DB instance.
" } }, + "CopyTagsToSnapshot": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
" + } + }, "PromotionTier": { "target": "com.amazonaws.docdb#IntegerOptional", "traits": { @@ -2802,6 +2823,9 @@ "input": { "target": "com.amazonaws.docdb#DeleteDBClusterParameterGroupMessage" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.docdb#DBParameterGroupNotFoundFault" @@ -2937,6 +2961,9 @@ "input": { "target": "com.amazonaws.docdb#DeleteDBSubnetGroupMessage" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.docdb#DBSubnetGroupNotFoundFault" @@ -5411,6 +5438,12 @@ "smithy.api#documentation": "Indicates the certificate that needs to be associated with the instance.
" } }, + "CopyTagsToSnapshot": { + "target": "com.amazonaws.docdb#BooleanOptional", + "traits": { + "smithy.api#documentation": "A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
" + } + }, "PromotionTier": { "target": "com.amazonaws.docdb#IntegerOptional", "traits": { @@ -6157,6 +6190,9 @@ "input": { "target": "com.amazonaws.docdb#RemoveTagsFromResourceMessage" }, + "output": { + "target": "smithy.api#Unit" + }, "errors": [ { "target": "com.amazonaws.docdb#DBClusterNotFoundFault" @@ -6494,6 +6530,12 @@ "smithy.api#required": {} } }, + "RestoreType": { + "target": "com.amazonaws.docdb#String", + "traits": { + "smithy.api#documentation": "The type of restore to be performed. You can specify one of the following values:
\n\n full-copy
- The new DB cluster is restored as a full copy of the\n source DB cluster.
\n copy-on-write
- The new DB cluster is restored as a clone of the\n source DB cluster.
Constraints: You can't specify copy-on-write
if the engine version of the source DB cluster is earlier than 1.11.
If you don't specify a RestoreType
value, then the new DB cluster is\n restored as a full copy of the source DB cluster.