diff --git a/CHANGELOG.md b/CHANGELOG.md index bd9022a9a2199..a3ec66c91e380 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,67 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-athena:** This feature allows customers to retrieve runtime statistics for completed queries ([6fe9621](https://github.com/aws/aws-sdk-js-v3/commit/6fe9621621e896fa31ad115c3865a35f00b12d3a)) +* **client-cloudwatch:** Adding support for the suppression of Composite Alarm actions ([e94c855](https://github.com/aws/aws-sdk-js-v3/commit/e94c855cd87e381d4cbf34685fe4f68da1031ffa)) +* **client-docdb:** Enable copy-on-write restore type ([9173056](https://github.com/aws/aws-sdk-js-v3/commit/91730562c1fe693c0270b088e5bd0c02d50372d4)) +* **client-ec2-instance-connect:** This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs. ([a2aef86](https://github.com/aws/aws-sdk-js-v3/commit/a2aef86781ea0833877ada3ed3dcdbaab5d700ec)) +* **client-frauddetector:** The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs. ([b8837a8](https://github.com/aws/aws-sdk-js-v3/commit/b8837a8fe3ee514eed76a87d54e9165433ea0215)) +* **client-iotsitewise:** Added asynchronous API to ingest bulk historical and current data into IoT SiteWise. ([972d020](https://github.com/aws/aws-sdk-js-v3/commit/972d020ec9d8e713b9306aa8a39b036886127fc0)) +* **client-kendra:** Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html ([de0697a](https://github.com/aws/aws-sdk-js-v3/commit/de0697ad65ae98358ea534d9c2c90eadf4111bce)) +* **client-network-firewall:** Network Firewall now supports referencing dynamic IP sets from stateful rule groups, for IP sets stored in Amazon VPC prefix lists. ([7fb7490](https://github.com/aws/aws-sdk-js-v3/commit/7fb7490a4c093be17407d16f77151f3fa7c3e03a)) + + + + + +# [3.134.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.133.0...v3.134.0) (2022-07-20) + + +### Features + +* **client-iot:** GA release the ability to enable/disable IoT Fleet Indexing for Device Defender and Named Shadow information, and search them through IoT Fleet Indexing APIs. This includes Named Shadow Selection as a part of the UpdateIndexingConfiguration API. ([358a73e](https://github.com/aws/aws-sdk-js-v3/commit/358a73e37eca1782a115e9c304bdc27889fc7026)) +* **clients:** update client endpoints as of 2022-07-20 ([e67364c](https://github.com/aws/aws-sdk-js-v3/commit/e67364c42bb347d50bd98d1a3be547dae722cea8)) + + + + + +# [3.133.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.132.0...v3.133.0) (2022-07-19) + + +### Features + +* **client-devops-guru:** Added new APIs for log anomaly detection feature. ([90ed85b](https://github.com/aws/aws-sdk-js-v3/commit/90ed85b6817d3f035124d62818670b3b4807c492)) +* **client-sagemaker-edge:** Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. ([316a691](https://github.com/aws/aws-sdk-js-v3/commit/316a691491232facc572b8482f9084355702bfbf)) +* **client-sagemaker:** Fixed an issue with cross account QueryLineage ([ed39bc2](https://github.com/aws/aws-sdk-js-v3/commit/ed39bc2c1bd0d2bad021a043b7642fae5ea9263d)) +* **client-workspaces:** Increased the character limit of the login message from 850 to 2000 characters. ([6481e59](https://github.com/aws/aws-sdk-js-v3/commit/6481e598bf2360dfe18714996867eeed605a6f82)) + + + + + +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-application-discovery-service:** Add AWS Agentless Collector details to the GetDiscoverySummary API response ([d790bda](https://github.com/aws/aws-sdk-js-v3/commit/d790bda6e7dffcb5a74c12550d4bbd14bdb4051c)) +* **client-elasticache:** Adding AutoMinorVersionUpgrade in the DescribeReplicationGroups API ([e694134](https://github.com/aws/aws-sdk-js-v3/commit/e694134054db9c206aec4b6e6fa26b8ab807d6eb)) +* **client-kms:** Added support for the SM2 KeySpec in China Partition Regions ([250fd19](https://github.com/aws/aws-sdk-js-v3/commit/250fd19d65a83b3d183ced9863bf6aba8f1fb698)) +* **client-mediapackage:** This release adds "IncludeIframeOnlyStream" for Dash endpoints and increases the number of supported video and audio encryption presets for Speke v2 ([1af0368](https://github.com/aws/aws-sdk-js-v3/commit/1af0368f117fcf8c5bc43659183aec028b2ecc8e)) +* **client-sagemaker:** Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. ([3f89139](https://github.com/aws/aws-sdk-js-v3/commit/3f891393cfe5002f24317a340f88ce0e1df7aa44)) +* **client-sso-admin:** AWS SSO now supports attaching customer managed policies and a permissions boundary to your permission sets. This release adds new API operations to manage and view the customer managed policies and the permissions boundary for a given permission set. ([e65c9b2](https://github.com/aws/aws-sdk-js-v3/commit/e65c9b2e1203291d68d0fdb11d1b467d83e37cf5)) +* **clients:** update client endpoints as of 2022-07-18 ([87e9e29](https://github.com/aws/aws-sdk-js-v3/commit/87e9e2970f5fd24742968462e3944235c5e08380)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) diff --git a/clients/client-acm-pca/CHANGELOG.md b/clients/client-acm-pca/CHANGELOG.md index 57cdb708def60..a2753394c09df 100644 --- a/clients/client-acm-pca/CHANGELOG.md +++ b/clients/client-acm-pca/CHANGELOG.md @@ -3,6 +3,14 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.134.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.133.0...v3.134.0) (2022-07-20) + +**Note:** Version bump only for package @aws-sdk/client-acm-pca + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-acm-pca diff --git a/clients/client-acm-pca/README.md b/clients/client-acm-pca/README.md index af45c44b3ae5d..ef0891e01576f 100644 --- a/clients/client-acm-pca/README.md +++ b/clients/client-acm-pca/README.md @@ -22,7 +22,6 @@ depending on the operation. Throttling means that ACM Private CA rejects an othe request because the request exceeds the operation's quota for the number of requests per second. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request rate for APIs.
-To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase, log into your Amazon Web Services account and visit the Service Quotas console.
diff --git a/clients/client-acm-pca/package.json b/clients/client-acm-pca/package.json index c676f34ca793b..f7929f7e92acd 100644 --- a/clients/client-acm-pca/package.json +++ b/clients/client-acm-pca/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-acm-pca", "description": "AWS SDK for JavaScript Acm Pca Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.134.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-acm-pca/src/ACMPCA.ts b/clients/client-acm-pca/src/ACMPCA.ts index 6531ecdc08ccb..c478e5b669cfd 100644 --- a/clients/client-acm-pca/src/ACMPCA.ts +++ b/clients/client-acm-pca/src/ACMPCA.ts @@ -120,7 +120,6 @@ import { * request because the request exceeds the operation's quota for the number of requests per * second. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request * rate for APIs. - * *To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase, * log into your Amazon Web Services account and visit the Service Quotas * console.
diff --git a/clients/client-acm-pca/src/ACMPCAClient.ts b/clients/client-acm-pca/src/ACMPCAClient.ts index 177eef6c20351..d4ec18c8d9bef 100644 --- a/clients/client-acm-pca/src/ACMPCAClient.ts +++ b/clients/client-acm-pca/src/ACMPCAClient.ts @@ -334,7 +334,6 @@ export interface ACMPCAClientResolvedConfig extends ACMPCAClientResolvedConfigTy * request because the request exceeds the operation's quota for the number of requests per * second. When a request is throttled, ACM Private CA returns a ThrottlingException error. ACM Private CA does not guarantee a minimum request * rate for APIs. - * *To see an up-to-date list of your ACM Private CA quotas, or to request a quota increase, * log into your Amazon Web Services account and visit the Service Quotas * console.
diff --git a/clients/client-acm-pca/src/models/models_0.ts b/clients/client-acm-pca/src/models/models_0.ts index 31453c3b2c72a..4dc3caedf3c79 100644 --- a/clients/client-acm-pca/src/models/models_0.ts +++ b/clients/client-acm-pca/src/models/models_0.ts @@ -8,17 +8,14 @@ import { ACMPCAServiceException as __BaseException } from "./ACMPCAServiceExcept */ export interface CustomAttribute { /** - *Specifies the object identifier (OID) of the attribute type of - * the - * relative distinguished name - * (RDN).
+ *Specifies the object identifier (OID) of the attribute type of the relative + * distinguished name (RDN).
*/ ObjectIdentifier: string | undefined; /** * - *Specifies the attribute value of relative distinguished name - * (RDN).
+ *Specifies the attribute value of relative distinguished name (RDN).
*/ Value: string | undefined; } @@ -125,16 +122,9 @@ export interface ASN1Subject { /** * - *Contains a sequence of one or more X.500 relative distinguished - * names - * (RDNs), - * each of which consists of an object identifier (OID) and - * a - * value. For more information, see NIST’s definition of - * Object - * Identifier - * (OID).
- * + *Contains a sequence of one or more X.500 relative distinguished names (RDNs), each of + * which consists of an object identifier (OID) and a value. For more information, see + * NIST’s definition of Object Identifier (OID).
*Custom attributes cannot be used in combination with standard attributes.
*A CRL is typically updated approximately 30 minutes after a certificate * is revoked. If for any reason a CRL update fails, ACM Private CA makes further attempts * every 15 minutes.
- * *CRLs contain the following fields:
*Specifies a - * cryptographic key management compliance standard used for handling CA keys.
+ *Specifies a cryptographic key management compliance standard used for handling CA + * keys.
*Default: FIPS_140_2_LEVEL_3_OR_HIGHER
- *Note: FIPS_140_2_LEVEL_3_OR_HIGHER
is not supported in Region
- * ap-northeast-3. When creating a CA in the ap-northeast-3, you must provide
+ *
+ * Note:
+ * FIPS_140_2_LEVEL_3_OR_HIGHER
is not supported in the following
+ * Regions:
ap-northeast-3
+ *ap-southeast-3
+ *When creating a CA in these Regions, you must provide
* FIPS_140_2_LEVEL_2_OR_HIGHER
as the argument for
* KeyStorageSecurityStandard
. Failure to do this results in an
* InvalidArgsException
with the message, "A certificate authority cannot
@@ -1819,8 +1818,7 @@ export namespace PolicyInformation {
/**
*
Specifies the X.509 extension information for a - * certificate.
+ *Specifies the X.509 extension information for a certificate.
*Extensions present in Specifies the object identifier (OID) of the X.509 extension. For more information,
- * see the
- * Global OID reference
- * database.
+ * Specifies the object identifier (OID) of the X.509 extension. For more information, see the
+ * Global OID reference database.
* Specifies the base64-encoded value of the X.509
- * extension. Specifies the base64-encoded value of the X.509 extension. Specifies the critical flag of
- * the
- * X.509
- * extension. Specifies the critical flag of the X.509 extension. Contains a sequence of one or more X.509 extensions, each of which consists of an
- * object identifier (OID), a base64-encoded
- * value,
- * and the
- * critical flag.
- * For
- * more information, see the Global OID reference
+ * object identifier (OID), a base64-encoded value, and the critical flag. For more
+ * information, see the Global OID reference
* database.
* The OID value of a CustomExtension must not
- * match the OID of a predefined extension. Inventory data for installed discovery agents.CustomExtensions
follow the
* ApiPassthrough
* template
@@ -1829,27 +1827,21 @@ export namespace PolicyInformation {
export interface CustomExtension {
/**
*
- *
Returns query execution runtime statistics related to a single execution of a query if you + * have access to the workgroup in which the query ran. The query execution runtime statistics + * is returned only when QueryExecutionStatus$State is in a SUCCEEDED + * or FAILED state.
+ */ + public getQueryRuntimeStatistics( + args: GetQueryRuntimeStatisticsCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns table metadata for the specified catalog, database, and table.
*/ diff --git a/clients/client-athena/src/AthenaClient.ts b/clients/client-athena/src/AthenaClient.ts index 00fe431ef2d17..0254e0b16fd99 100644 --- a/clients/client-athena/src/AthenaClient.ts +++ b/clients/client-athena/src/AthenaClient.ts @@ -85,6 +85,10 @@ import { } from "./commands/GetPreparedStatementCommand"; import { GetQueryExecutionCommandInput, GetQueryExecutionCommandOutput } from "./commands/GetQueryExecutionCommand"; import { GetQueryResultsCommandInput, GetQueryResultsCommandOutput } from "./commands/GetQueryResultsCommand"; +import { + GetQueryRuntimeStatisticsCommandInput, + GetQueryRuntimeStatisticsCommandOutput, +} from "./commands/GetQueryRuntimeStatisticsCommand"; import { GetTableMetadataCommandInput, GetTableMetadataCommandOutput } from "./commands/GetTableMetadataCommand"; import { GetWorkGroupCommandInput, GetWorkGroupCommandOutput } from "./commands/GetWorkGroupCommand"; import { ListDatabasesCommandInput, ListDatabasesCommandOutput } from "./commands/ListDatabasesCommand"; @@ -139,6 +143,7 @@ export type ServiceInputTypes = | GetPreparedStatementCommandInput | GetQueryExecutionCommandInput | GetQueryResultsCommandInput + | GetQueryRuntimeStatisticsCommandInput | GetTableMetadataCommandInput | GetWorkGroupCommandInput | ListDataCatalogsCommandInput @@ -177,6 +182,7 @@ export type ServiceOutputTypes = | GetPreparedStatementCommandOutput | GetQueryExecutionCommandOutput | GetQueryResultsCommandOutput + | GetQueryRuntimeStatisticsCommandOutput | GetTableMetadataCommandOutput | GetWorkGroupCommandOutput | ListDataCatalogsCommandOutput diff --git a/clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts b/clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts new file mode 100644 index 0000000000000..1d8a4ebd78864 --- /dev/null +++ b/clients/client-athena/src/commands/GetQueryRuntimeStatisticsCommand.ts @@ -0,0 +1,102 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AthenaClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AthenaClient"; +import { GetQueryRuntimeStatisticsInput, GetQueryRuntimeStatisticsOutput } from "../models/models_0"; +import { + deserializeAws_json1_1GetQueryRuntimeStatisticsCommand, + serializeAws_json1_1GetQueryRuntimeStatisticsCommand, +} from "../protocols/Aws_json1_1"; + +export interface GetQueryRuntimeStatisticsCommandInput extends GetQueryRuntimeStatisticsInput {} +export interface GetQueryRuntimeStatisticsCommandOutput extends GetQueryRuntimeStatisticsOutput, __MetadataBearer {} + +/** + *Returns query execution runtime statistics related to a single execution of a query if you + * have access to the workgroup in which the query ran. The query execution runtime statistics + * is returned only when QueryExecutionStatus$State is in a SUCCEEDED + * or FAILED state.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AthenaClient, GetQueryRuntimeStatisticsCommand } from "@aws-sdk/client-athena"; // ES Modules import + * // const { AthenaClient, GetQueryRuntimeStatisticsCommand } = require("@aws-sdk/client-athena"); // CommonJS import + * const client = new AthenaClient(config); + * const command = new GetQueryRuntimeStatisticsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetQueryRuntimeStatisticsCommandInput} for command's `input` shape. + * @see {@link GetQueryRuntimeStatisticsCommandOutput} for command's `response` shape. + * @see {@link AthenaClientResolvedConfig | config} for AthenaClient's `config` shape. + * + */ +export class GetQueryRuntimeStatisticsCommand extends $Command< + GetQueryRuntimeStatisticsCommandInput, + GetQueryRuntimeStatisticsCommandOutput, + AthenaClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetQueryRuntimeStatisticsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe unique ID of the query execution.
+ */ + QueryExecutionId: string | undefined; +} + +export namespace GetQueryRuntimeStatisticsInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryRuntimeStatisticsInput): any => ({ + ...obj, + }); +} + +/** + *Statistics such as input rows and bytes read by the query, rows and bytes output by the query, + * and the number of rows written by the query.
+ */ +export interface QueryRuntimeStatisticsRows { + /** + *The number of rows read to execute the query.
+ */ + InputRows?: number; + + /** + *The number of bytes read to execute the query.
+ */ + InputBytes?: number; + + /** + *The number of bytes returned by the query.
+ */ + OutputBytes?: number; + + /** + *The number of rows returned by the query.
+ */ + OutputRows?: number; +} + +export namespace QueryRuntimeStatisticsRows { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryRuntimeStatisticsRows): any => ({ + ...obj, + }); +} + +/** + *Timeline statistics such as query queue time, planning time, execution time, service processing + * time, and total execution time.
+ */ +export interface QueryRuntimeStatisticsTimeline { + /** + *The number of milliseconds that the query was in your query queue waiting for + * resources. Note that if transient errors occur, Athena might automatically + * add the query back to the queue.
+ */ + QueryQueueTimeInMillis?: number; + + /** + *The number of milliseconds that Athena took to plan the query processing + * flow. This includes the time spent retrieving table partitions from the data source. + * Note that because the query engine performs the query planning, query planning time is a + * subset of engine processing time.
+ */ + QueryPlanningTimeInMillis?: number; + + /** + *The number of milliseconds that the query took to execute.
+ */ + EngineExecutionTimeInMillis?: number; + + /** + *The number of milliseconds that Athena took to finalize and publish the + * query results after the query engine finished running the query.
+ */ + ServiceProcessingTimeInMillis?: number; + + /** + *The number of milliseconds that Athena took to run the query.
+ */ + TotalExecutionTimeInMillis?: number; +} + +export namespace QueryRuntimeStatisticsTimeline { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryRuntimeStatisticsTimeline): any => ({ + ...obj, + }); +} + export interface GetTableMetadataInput { /** *The name of the data catalog that contains the database and table metadata to @@ -3198,3 +3295,147 @@ export namespace UpdateWorkGroupOutput { ...obj, }); } + +/** + *
Stage plan information such as name, identifier, sub plans, and remote sources.
+ */ +export interface QueryStagePlanNode { + /** + *Name of the query stage plan that describes the operation this stage is performing as part of + * query execution.
+ */ + Name?: string; + + /** + *Information about the operation this query stage plan node is performing.
+ */ + Identifier?: string; + + /** + *Stage plan information such as name, identifier, sub plans, and remote sources of child plan nodes/
+ */ + Children?: QueryStagePlanNode[]; + + /** + *Source plan node IDs.
+ */ + RemoteSources?: string[]; +} + +export namespace QueryStagePlanNode { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryStagePlanNode): any => ({ + ...obj, + }); +} + +/** + *Stage statistics such as input and output rows and bytes, execution time and stage state. This + * information also includes substages and the query stage plan.
+ */ +export interface QueryStage { + /** + *The identifier for a stage.
+ */ + StageId?: number; + + /** + *State of the stage after query execution.
+ */ + State?: string; + + /** + *The number of bytes output from the stage after execution.
+ */ + OutputBytes?: number; + + /** + *The number of rows output from the stage after execution.
+ */ + OutputRows?: number; + + /** + *The number of bytes input into the stage for execution.
+ */ + InputBytes?: number; + + /** + *The number of rows input into the stage for execution.
+ */ + InputRows?: number; + + /** + *Time taken to execute this stage.
+ */ + ExecutionTime?: number; + + /** + *Stage plan information such as name, identifier, sub plans, and source stages.
+ */ + QueryStagePlan?: QueryStagePlanNode; + + /** + *List of sub query stages that form this stage execution plan.
+ */ + SubStages?: QueryStage[]; +} + +export namespace QueryStage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryStage): any => ({ + ...obj, + }); +} + +/** + *The query execution timeline, statistics on input and output rows and bytes, and the different + * query stages that form the query execution plan.
+ */ +export interface QueryRuntimeStatistics { + /** + *Timeline statistics such as query queue time, planning time, execution time, service processing + * time, and total execution time.
+ */ + Timeline?: QueryRuntimeStatisticsTimeline; + + /** + *Statistics such as input rows and bytes read by the query, rows and bytes output by the query, + * and the number of rows written by the query.
+ */ + Rows?: QueryRuntimeStatisticsRows; + + /** + *Stage statistics such as input and output rows and bytes, execution time, and stage state. This + * information also includes substages and the query stage plan.
+ */ + OutputStage?: QueryStage; +} + +export namespace QueryRuntimeStatistics { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryRuntimeStatistics): any => ({ + ...obj, + }); +} + +export interface GetQueryRuntimeStatisticsOutput { + /** + *Runtime statistics about the query execution.
+ */ + QueryRuntimeStatistics?: QueryRuntimeStatistics; +} + +export namespace GetQueryRuntimeStatisticsOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryRuntimeStatisticsOutput): any => ({ + ...obj, + }); +} diff --git a/clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts b/clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts new file mode 100644 index 0000000000000..52f6913a5ba9b --- /dev/null +++ b/clients/client-athena/src/pagination/ListEngineVersionsPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { Athena } from "../Athena"; +import { AthenaClient } from "../AthenaClient"; +import { + ListEngineVersionsCommand, + ListEngineVersionsCommandInput, + ListEngineVersionsCommandOutput, +} from "../commands/ListEngineVersionsCommand"; +import { AthenaPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: AthenaClient, + input: ListEngineVersionsCommandInput, + ...args: any +): PromiseThe time stamp of the last update to the alarm state.
+ *Tracks the timestamp of any state update, even if StateValue
doesn't change.
The state value for the alarm.
*/ StateValue?: StateValue | string; + + /** + *
+ * The timestamp
+ * of the last change
+ * to the alarm's StateValue
.
+ *
+ * When the value is ALARM
,
+ * it means
+ * that the actions are suppressed
+ * because the suppressor alarm is
+ * in ALARM
+ * When the value is WaitPeriod
,
+ * it means that
+ * the actions are suppressed
+ * because the composite alarm is waiting
+ * for the suppressor alarm
+ * to go
+ * into
+ * into the ALARM
state.
+ * The maximum waiting time is as specified
+ * in ActionsSuppressorWaitPeriod
.
+ * After this time,
+ * the composite alarm performs its actions.
+ * When the value is ExtensionPeriod
,
+ * it means
+ * that the actions are suppressed
+ * because the composite alarm is waiting
+ * after the suppressor alarm went out
+ * of the ALARM
state.
+ * The maximum waiting time is as specified
+ * in ActionsSuppressorExtensionPeriod
.
+ * After this time,
+ * the composite alarm performs its actions.
+ *
+ * Captures the reason for action suppression. + *
+ */ + ActionsSuppressedReason?: string; + + /** + *
+ * Actions will be suppressed
+ * if the suppressor alarm is
+ * in the ALARM
state.
+ * ActionsSuppressor
can be an AlarmName or an Amazon Resource Name (ARN)
+ * from an existing alarm.
+ *
+ * The maximum time
+ * in seconds
+ * that the composite alarm waits
+ * for the suppressor alarm
+ * to go
+ * into the ALARM
state.
+ * After this time,
+ * the composite alarm performs its actions.
+ *
+ * WaitPeriod
+ * is required only
+ * when ActionsSuppressor
is specified.
+ *
+ * The maximum time
+ * in seconds
+ * that the composite alarm waits
+ * after suppressor alarm goes out
+ * of the ALARM
state.
+ * After this time,
+ * the composite alarm performs its actions.
+ *
+ * ExtensionPeriod
+ * is required only
+ * when ActionsSuppressor
is specified.
+ *
+ * Actions will be suppressed
+ * if the suppressor alarm is
+ * in the ALARM
state.
+ * ActionsSuppressor
can be an AlarmName or an Amazon Resource Name (ARN)
+ * from an existing alarm.
+ *
+ * The maximum time
+ * in seconds
+ * that the composite alarm waits
+ * for the suppressor alarm
+ * to go
+ * into the ALARM
state.
+ * After this time,
+ * the composite alarm performs its actions.
+ *
+ * WaitPeriod
+ * is required only
+ * when ActionsSuppressor
is specified.
+ *
+ * The maximum time
+ * in seconds
+ * that the composite alarm waits
+ * after suppressor alarm goes out
+ * of the ALARM
state.
+ * After this time,
+ * the composite alarm performs its actions.
+ *
+ * ExtensionPeriod
+ * is required only
+ * when ActionsSuppressor
is specified.
+ *
EndpointType
value, include "mysql"
, "oracle"
,
* "postgres"
, "mariadb"
, "aurora"
,
* "aurora-postgresql"
, "opensearch"
, "redshift"
, "s3"
,
- * "db2"
, "azuredb"
, "sybase"
, "dynamodb"
, "mongodb"
,
+ * "db2"
, db2-zos
, "azuredb"
, "sybase"
, "dynamodb"
, "mongodb"
,
* "kinesis"
, "kafka"
, "elasticsearch"
, "docdb"
,
- * "sqlserver"
, and "neptune"
.
+ * "sqlserver"
, "neptune"
, and babelfish
.
*/
EngineName: string | undefined;
@@ -8885,7 +8885,7 @@ export interface TableStatistics {
/**
* The state of the tables described.
*Valid states: Table does not exist | Before load | Full load | Table completed | Table - * cancelled | Table error | Table all | Table updates | Table is being reloaded
+ * cancelled | Table error | Table is being reloaded */ TableState?: string; diff --git a/clients/client-devops-guru/CHANGELOG.md b/clients/client-devops-guru/CHANGELOG.md index a734a1c284458..52d4a9ca19ed4 100644 --- a/clients/client-devops-guru/CHANGELOG.md +++ b/clients/client-devops-guru/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.133.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.132.0...v3.133.0) (2022-07-19) + + +### Features + +* **client-devops-guru:** Added new APIs for log anomaly detection feature. ([90ed85b](https://github.com/aws/aws-sdk-js-v3/commit/90ed85b6817d3f035124d62818670b3b4807c492)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) diff --git a/clients/client-devops-guru/package.json b/clients/client-devops-guru/package.json index ed538c32fa54e..ab6bec2e6e462 100644 --- a/clients/client-devops-guru/package.json +++ b/clients/client-devops-guru/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-devops-guru", "description": "AWS SDK for JavaScript Devops Guru Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.133.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-devops-guru/src/DevOpsGuru.ts b/clients/client-devops-guru/src/DevOpsGuru.ts index c63bd81c9e26a..9bf86d21f07be 100644 --- a/clients/client-devops-guru/src/DevOpsGuru.ts +++ b/clients/client-devops-guru/src/DevOpsGuru.ts @@ -81,12 +81,22 @@ import { ListAnomaliesForInsightCommandInput, ListAnomaliesForInsightCommandOutput, } from "./commands/ListAnomaliesForInsightCommand"; +import { + ListAnomalousLogGroupsCommand, + ListAnomalousLogGroupsCommandInput, + ListAnomalousLogGroupsCommandOutput, +} from "./commands/ListAnomalousLogGroupsCommand"; import { ListEventsCommand, ListEventsCommandInput, ListEventsCommandOutput } from "./commands/ListEventsCommand"; import { ListInsightsCommand, ListInsightsCommandInput, ListInsightsCommandOutput, } from "./commands/ListInsightsCommand"; +import { + ListMonitoredResourcesCommand, + ListMonitoredResourcesCommandInput, + ListMonitoredResourcesCommandOutput, +} from "./commands/ListMonitoredResourcesCommand"; import { ListNotificationChannelsCommand, ListNotificationChannelsCommandInput, @@ -706,6 +716,40 @@ export class DevOpsGuru extends DevOpsGuruClient { } } + /** + *+ * Returns the list of log groups that contain log anomalies. + *
+ */ + public listAnomalousLogGroups( + args: ListAnomalousLogGroupsCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns a list of the events emitted by the resources that are evaluated by DevOps Guru. * You can use filters to specify which events are returned.
@@ -764,6 +808,40 @@ export class DevOpsGuru extends DevOpsGuruClient { } } + /** + *+ * Returns the list of all log groups that are being monitored and tagged by DevOps Guru. + *
+ */ + public listMonitoredResources( + args: ListMonitoredResourcesCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns a list of notification channels configured for DevOps Guru. Each notification * channel is used to notify you when DevOps Guru generates an insight that contains information diff --git a/clients/client-devops-guru/src/DevOpsGuruClient.ts b/clients/client-devops-guru/src/DevOpsGuruClient.ts index fe001b5be4ee2..7ea4e459a2ac6 100644 --- a/clients/client-devops-guru/src/DevOpsGuruClient.ts +++ b/clients/client-devops-guru/src/DevOpsGuruClient.ts @@ -102,8 +102,16 @@ import { ListAnomaliesForInsightCommandInput, ListAnomaliesForInsightCommandOutput, } from "./commands/ListAnomaliesForInsightCommand"; +import { + ListAnomalousLogGroupsCommandInput, + ListAnomalousLogGroupsCommandOutput, +} from "./commands/ListAnomalousLogGroupsCommand"; import { ListEventsCommandInput, ListEventsCommandOutput } from "./commands/ListEventsCommand"; import { ListInsightsCommandInput, ListInsightsCommandOutput } from "./commands/ListInsightsCommand"; +import { + ListMonitoredResourcesCommandInput, + ListMonitoredResourcesCommandOutput, +} from "./commands/ListMonitoredResourcesCommand"; import { ListNotificationChannelsCommandInput, ListNotificationChannelsCommandOutput, @@ -161,8 +169,10 @@ export type ServiceInputTypes = | GetCostEstimationCommandInput | GetResourceCollectionCommandInput | ListAnomaliesForInsightCommandInput + | ListAnomalousLogGroupsCommandInput | ListEventsCommandInput | ListInsightsCommandInput + | ListMonitoredResourcesCommandInput | ListNotificationChannelsCommandInput | ListOrganizationInsightsCommandInput | ListRecommendationsCommandInput @@ -192,8 +202,10 @@ export type ServiceOutputTypes = | GetCostEstimationCommandOutput | GetResourceCollectionCommandOutput | ListAnomaliesForInsightCommandOutput + | ListAnomalousLogGroupsCommandOutput | ListEventsCommandOutput | ListInsightsCommandOutput + | ListMonitoredResourcesCommandOutput | ListNotificationChannelsCommandOutput | ListOrganizationInsightsCommandOutput | ListRecommendationsCommandOutput diff --git a/clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts b/clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts new file mode 100644 index 0000000000000..55fbef7d62b07 --- /dev/null +++ b/clients/client-devops-guru/src/commands/ListAnomalousLogGroupsCommand.ts @@ -0,0 +1,98 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DevOpsGuruClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DevOpsGuruClient"; +import { ListAnomalousLogGroupsRequest, ListAnomalousLogGroupsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListAnomalousLogGroupsCommand, + serializeAws_restJson1ListAnomalousLogGroupsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListAnomalousLogGroupsCommandInput extends ListAnomalousLogGroupsRequest {} +export interface ListAnomalousLogGroupsCommandOutput extends ListAnomalousLogGroupsResponse, __MetadataBearer {} + +/** + *
+ * Returns the list of log groups that contain log anomalies. + *
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DevOpsGuruClient, ListAnomalousLogGroupsCommand } from "@aws-sdk/client-devops-guru"; // ES Modules import + * // const { DevOpsGuruClient, ListAnomalousLogGroupsCommand } = require("@aws-sdk/client-devops-guru"); // CommonJS import + * const client = new DevOpsGuruClient(config); + * const command = new ListAnomalousLogGroupsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAnomalousLogGroupsCommandInput} for command's `input` shape. + * @see {@link ListAnomalousLogGroupsCommandOutput} for command's `response` shape. + * @see {@link DevOpsGuruClientResolvedConfig | config} for DevOpsGuruClient's `config` shape. + * + */ +export class ListAnomalousLogGroupsCommand extends $Command< + ListAnomalousLogGroupsCommandInput, + ListAnomalousLogGroupsCommandOutput, + DevOpsGuruClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAnomalousLogGroupsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack+ * Returns the list of all log groups that are being monitored and tagged by DevOps Guru. + *
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DevOpsGuruClient, ListMonitoredResourcesCommand } from "@aws-sdk/client-devops-guru"; // ES Modules import + * // const { DevOpsGuruClient, ListMonitoredResourcesCommand } = require("@aws-sdk/client-devops-guru"); // CommonJS import + * const client = new DevOpsGuruClient(config); + * const command = new ListMonitoredResourcesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListMonitoredResourcesCommandInput} for command's `input` shape. + * @see {@link ListMonitoredResourcesCommandOutput} for command's `response` shape. + * @see {@link DevOpsGuruClientResolvedConfig | config} for DevOpsGuruClient's `config` shape. + * + */ +export class ListMonitoredResourcesCommand extends $Command< + ListMonitoredResourcesCommandInput, + ListMonitoredResourcesCommandOutput, + DevOpsGuruClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListMonitoredResourcesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack+ * Information about an anomalous log event found within a log group. + *
+ */ +export interface LogAnomalyClass { + /** + *+ * The name of the Amazon CloudWatch log stream that the anomalous log event belongs to. A log stream is a sequence of log events that share the same source. + *
+ */ + LogStreamName?: string; + + /** + *+ * The type of log anomaly that has been detected. + *
+ */ + LogAnomalyType?: LogAnomalyType | string; + + /** + *+ * The token where the anomaly was detected. This may refer to an exception or another location, or it may be blank for log anomalies such as format anomalies. + *
+ */ + LogAnomalyToken?: string; + + /** + *+ * The ID of the log event. + *
+ */ + LogEventId?: string; + + /** + *+ * The explanation for why the log event is considered an anomaly. + *
+ */ + Explanation?: string; + + /** + *+ * The number of log lines where this anomalous log event occurs. + *
+ */ + NumberOfLogLinesOccurrences?: number; + + /** + *+ * The time of the first occurrence of the anomalous log event. + *
+ */ + LogEventTimestamp?: Date; +} + +export namespace LogAnomalyClass { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogAnomalyClass): any => ({ + ...obj, + }); +} + +/** + *+ * A cluster of similar anomalous log events found within a log group. + *
+ */ +export interface LogAnomalyShowcase { + /** + *+ * A list of anomalous log events that may be related. + *
+ */ + LogAnomalyClasses?: LogAnomalyClass[]; +} + +export namespace LogAnomalyShowcase { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogAnomalyShowcase): any => ({ + ...obj, + }); +} + +/** + *+ * An Amazon CloudWatch log group that contains log anomalies and is used to generate an insight. + *
+ */ +export interface AnomalousLogGroup { + /** + *+ * The name of the CloudWatch log group. + *
+ */ + LogGroupName?: string; + + /** + *+ * The time the anomalous log events began. The impact start time indicates the time of the first log anomaly event that occurs. + *
+ */ + ImpactStartTime?: Date; + + /** + *+ * The time the anomalous log events stopped. + *
+ */ + ImpactEndTime?: Date; + + /** + *+ * The number of log lines that were scanned for anomalous log events. + *
+ */ + NumberOfLogLinesScanned?: number; + + /** + *+ * The log anomalies in the log group. Each log anomaly displayed represents a cluster of similar anomalous log events. + *
+ */ + LogAnomalyShowcases?: LogAnomalyShowcase[]; +} + +export namespace AnomalousLogGroup { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AnomalousLogGroup): any => ({ + ...obj, + }); +} + /** * A time range that specifies when DevOps Guru opens and then closes an anomaly. This
* is different from AnomalyTimeRange
, which specifies the time range when
@@ -482,7 +632,7 @@ export enum AnomalySeverity {
}
/**
- *
The dimension of am Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in + *
The dimension of an Amazon CloudWatch metric that is used when DevOps Guru analyzes the resources in * your account for operational problems and anomalous behavior. A dimension is a * name/value pair that is part of the identity of a metric. A metric can have up to 10 * dimensions. For more information, see Dimensions in the Amazon CloudWatch User Guide.
@@ -2594,6 +2744,27 @@ export enum OptInStatus { ENABLED = "ENABLED", } +/** + *+ * Information about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection. + *
+ */ +export interface LogsAnomalyDetectionIntegration { + /** + *Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups.
+ */ + OptInStatus?: OptInStatus | string; +} + +export namespace LogsAnomalyDetectionIntegration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogsAnomalyDetectionIntegration): any => ({ + ...obj, + }); +} + /** *Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager * OpsCenter for each created insight.
@@ -2625,6 +2796,13 @@ export interface ServiceIntegrationConfig { * OpsCenter for each created insight. */ OpsCenter?: OpsCenterIntegration; + + /** + *+ * Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups. + *
+ */ + LogsAnomalyDetection?: LogsAnomalyDetectionIntegration; } export namespace ServiceIntegrationConfig { @@ -3405,6 +3583,67 @@ export namespace ListAnomaliesForInsightResponse { }); } +export interface ListAnomalousLogGroupsRequest { + /** + *+ * The ID of the insight containing the log groups. + *
+ */ + InsightId: string | undefined; + + /** + *The maximum number of results to return with a single call.
+ * To retrieve the remaining results, make another call with the returned nextToken
value.
The pagination token to use to retrieve + * the next page of results for this operation. If this value is null, it retrieves the first page.
+ */ + NextToken?: string; +} + +export namespace ListAnomalousLogGroupsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListAnomalousLogGroupsRequest): any => ({ + ...obj, + }); +} + +export interface ListAnomalousLogGroupsResponse { + /** + *+ * The ID of the insight containing the log groups. + *
+ */ + InsightId: string | undefined; + + /** + *+ * The list of Amazon CloudWatch log groups that are related to an insight. + *
+ */ + AnomalousLogGroups: AnomalousLogGroup[] | undefined; + + /** + *The pagination token to use to retrieve + * the next page of results for this operation. If there are no more pages, this value is null.
+ */ + NextToken?: string; +} + +export namespace ListAnomalousLogGroupsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListAnomalousLogGroupsResponse): any => ({ + ...obj, + }); +} + export enum EventDataSource { AWS_CLOUD_TRAIL = "AWS_CLOUD_TRAIL", AWS_CODE_DEPLOY = "AWS_CODE_DEPLOY", @@ -3985,6 +4224,136 @@ export namespace ListInsightsResponse { }); } +export enum ResourcePermission { + FULL_PERMISSION = "FULL_PERMISSION", + MISSING_PERMISSION = "MISSING_PERMISSION", +} + +export enum ResourceTypeFilter { + LOG_GROUPS = "LOG_GROUPS", +} + +/** + *+ * Filters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status. + *
+ */ +export interface ListMonitoredResourcesFilters { + /** + *+ * The permission status of a resource. + *
+ */ + ResourcePermission: ResourcePermission | string | undefined; + + /** + *+ * The type of resource that you wish to retrieve, such as log groups. + *
+ */ + ResourceTypeFilters: (ResourceTypeFilter | string)[] | undefined; +} + +export namespace ListMonitoredResourcesFilters { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListMonitoredResourcesFilters): any => ({ + ...obj, + }); +} + +export interface ListMonitoredResourcesRequest { + /** + *+ * Filters to determine which monitored resources you want to retrieve. You can filter by resource type or resource permission status. + *
+ */ + Filters: ListMonitoredResourcesFilters | undefined; + + /** + *The maximum number of results to return with a single call.
+ * To retrieve the remaining results, make another call with the returned nextToken
value.
The pagination token to use to retrieve + * the next page of results for this operation. If this value is null, it retrieves the first page.
+ */ + NextToken?: string; +} + +export namespace ListMonitoredResourcesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListMonitoredResourcesRequest): any => ({ + ...obj, + }); +} + +/** + *+ * Information about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource. + *
+ */ +export interface MonitoredResourceIdentifier { + /** + *+ * The name of the resource being monitored. + *
+ */ + MonitoredResourceName?: string; + + /** + *+ * The type of resource being monitored. + *
+ */ + Type?: string; + + /** + *+ * The permission status of a resource. + *
+ */ + ResourcePermission?: ResourcePermission | string; +} + +export namespace MonitoredResourceIdentifier { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MonitoredResourceIdentifier): any => ({ + ...obj, + }); +} + +export interface ListMonitoredResourcesResponse { + /** + *+ * Information about the resource that is being monitored, including the name of the resource, the type of resource, and whether or not permission is given to DevOps Guru to access that resource. + *
+ */ + MonitoredResourceIdentifiers: MonitoredResourceIdentifier[] | undefined; + + /** + *The pagination token to use to retrieve + * the next page of results for this operation. If there are no more pages, this value is null.
+ */ + NextToken?: string; +} + +export namespace ListMonitoredResourcesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListMonitoredResourcesResponse): any => ({ + ...obj, + }); +} + export interface ListNotificationChannelsRequest { /** *The pagination token to use to retrieve @@ -5038,9 +5407,30 @@ export namespace UpdateResourceCollectionResponse { }); } +/** + *
+ * Information about the integration of DevOps Guru with CloudWatch log groups for log anomaly detection. You can use this to update the configuration. + *
+ */ +export interface LogsAnomalyDetectionIntegrationConfig { + /** + *Specifies if DevOps Guru is configured to perform log anomaly detection on CloudWatch log groups.
+ */ + OptInStatus?: OptInStatus | string; +} + +export namespace LogsAnomalyDetectionIntegrationConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogsAnomalyDetectionIntegrationConfig): any => ({ + ...obj, + }); +} + /** *Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager - * OpsCenter for each created insight.
+ * OpsCenter for each created insight. You can use this to update the configuration. */ export interface OpsCenterIntegrationConfig { /** @@ -5066,9 +5456,16 @@ export namespace OpsCenterIntegrationConfig { export interface UpdateServiceIntegrationConfig { /** *Information about whether DevOps Guru is configured to create an OpsItem in Amazon Web Services Systems Manager - * OpsCenter for each created insight.
+ * OpsCenter for each created insight. You can use this to update the configuration. */ OpsCenter?: OpsCenterIntegrationConfig; + + /** + *+ * Information about whether DevOps Guru is configured to perform log anomaly detection on Amazon CloudWatch log groups. + *
+ */ + LogsAnomalyDetection?: LogsAnomalyDetectionIntegrationConfig; } export namespace UpdateServiceIntegrationConfig { diff --git a/clients/client-devops-guru/src/pagination/ListAnomalousLogGroupsPaginator.ts b/clients/client-devops-guru/src/pagination/ListAnomalousLogGroupsPaginator.ts new file mode 100644 index 0000000000000..c3828d32fb4b9 --- /dev/null +++ b/clients/client-devops-guru/src/pagination/ListAnomalousLogGroupsPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListAnomalousLogGroupsCommand, + ListAnomalousLogGroupsCommandInput, + ListAnomalousLogGroupsCommandOutput, +} from "../commands/ListAnomalousLogGroupsCommand"; +import { DevOpsGuru } from "../DevOpsGuru"; +import { DevOpsGuruClient } from "../DevOpsGuruClient"; +import { DevOpsGuruPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: DevOpsGuruClient, + input: ListAnomalousLogGroupsCommandInput, + ...args: any +): PromiseIdentifies the clone group to which the DB cluster is associated.
+ */ + CloneGroupId?: string; + /** *Specifies the time when the cluster was created, in Universal Coordinated Time * (UTC).
@@ -1986,6 +1991,11 @@ export interface CreateDBInstanceMessage { */ DBClusterIdentifier: string | undefined; + /** + *A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
+ */ + CopyTagsToSnapshot?: boolean; + /** *A value that specifies the order in which an Amazon DocumentDB replica is promoted to the * primary instance after a failure of the existing primary instance.
@@ -2434,6 +2444,11 @@ export interface DBInstance { */ CACertificateIdentifier?: string; + /** + *A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
+ */ + CopyTagsToSnapshot?: boolean; + /** *A value that specifies the order in which an Amazon DocumentDB replica is promoted to the * primary instance after a failure of the existing primary instance.
@@ -5684,6 +5699,11 @@ export interface ModifyDBInstanceMessage { */ CACertificateIdentifier?: string; + /** + *A value that indicates whether to copy all tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.
+ */ + CopyTagsToSnapshot?: boolean; + /** *A value that specifies the order in which an Amazon DocumentDB replica is promoted to the primary instance after a failure of the existing primary instance.
*Default: 1
@@ -6326,6 +6346,26 @@ export interface RestoreDBClusterToPointInTimeMessage { */ DBClusterIdentifier: string | undefined; + /** + *The type of restore to be performed. You can specify one of the following values:
+ *
+ * full-copy
- The new DB cluster is restored as a full copy of the
+ * source DB cluster.
+ * copy-on-write
- The new DB cluster is restored as a clone of the
+ * source DB cluster.
Constraints: You can't specify copy-on-write
if the engine version of the source DB cluster is earlier than 1.11.
If you don't specify a RestoreType
value, then the new DB cluster is
+ * restored as a full copy of the source DB cluster.
The identifier of the source cluster from which to restore.
*Constraints:
diff --git a/clients/client-docdb/src/protocols/Aws_query.ts b/clients/client-docdb/src/protocols/Aws_query.ts index 4583ca7d61601..11bb216ad5a89 100644 --- a/clients/client-docdb/src/protocols/Aws_query.ts +++ b/clients/client-docdb/src/protocols/Aws_query.ts @@ -5068,6 +5068,9 @@ const serializeAws_queryCreateDBInstanceMessage = (input: CreateDBInstanceMessag if (input.DBClusterIdentifier !== undefined && input.DBClusterIdentifier !== null) { entries["DBClusterIdentifier"] = input.DBClusterIdentifier; } + if (input.CopyTagsToSnapshot !== undefined && input.CopyTagsToSnapshot !== null) { + entries["CopyTagsToSnapshot"] = input.CopyTagsToSnapshot; + } if (input.PromotionTier !== undefined && input.PromotionTier !== null) { entries["PromotionTier"] = input.PromotionTier; } @@ -5914,6 +5917,9 @@ const serializeAws_queryModifyDBInstanceMessage = (input: ModifyDBInstanceMessag if (input.CACertificateIdentifier !== undefined && input.CACertificateIdentifier !== null) { entries["CACertificateIdentifier"] = input.CACertificateIdentifier; } + if (input.CopyTagsToSnapshot !== undefined && input.CopyTagsToSnapshot !== null) { + entries["CopyTagsToSnapshot"] = input.CopyTagsToSnapshot; + } if (input.PromotionTier !== undefined && input.PromotionTier !== null) { entries["PromotionTier"] = input.PromotionTier; } @@ -6188,6 +6194,9 @@ const serializeAws_queryRestoreDBClusterToPointInTimeMessage = ( if (input.DBClusterIdentifier !== undefined && input.DBClusterIdentifier !== null) { entries["DBClusterIdentifier"] = input.DBClusterIdentifier; } + if (input.RestoreType !== undefined && input.RestoreType !== null) { + entries["RestoreType"] = input.RestoreType; + } if (input.SourceDBClusterIdentifier !== undefined && input.SourceDBClusterIdentifier !== null) { entries["SourceDBClusterIdentifier"] = input.SourceDBClusterIdentifier; } @@ -6620,6 +6629,7 @@ const deserializeAws_queryDBCluster = (output: any, context: __SerdeContext): DB DbClusterResourceId: undefined, DBClusterArn: undefined, AssociatedRoles: undefined, + CloneGroupId: undefined, ClusterCreateTime: undefined, EnabledCloudwatchLogsExports: undefined, DeletionProtection: undefined, @@ -6742,6 +6752,9 @@ const deserializeAws_queryDBCluster = (output: any, context: __SerdeContext): DB context ); } + if (output["CloneGroupId"] !== undefined) { + contents.CloneGroupId = __expectString(output["CloneGroupId"]); + } if (output["ClusterCreateTime"] !== undefined) { contents.ClusterCreateTime = __expectNonNull(__parseRfc3339DateTime(output["ClusterCreateTime"])); } @@ -7306,6 +7319,7 @@ const deserializeAws_queryDBInstance = (output: any, context: __SerdeContext): D KmsKeyId: undefined, DbiResourceId: undefined, CACertificateIdentifier: undefined, + CopyTagsToSnapshot: undefined, PromotionTier: undefined, DBInstanceArn: undefined, EnabledCloudwatchLogsExports: undefined, @@ -7395,6 +7409,9 @@ const deserializeAws_queryDBInstance = (output: any, context: __SerdeContext): D if (output["CACertificateIdentifier"] !== undefined) { contents.CACertificateIdentifier = __expectString(output["CACertificateIdentifier"]); } + if (output["CopyTagsToSnapshot"] !== undefined) { + contents.CopyTagsToSnapshot = __parseBoolean(output["CopyTagsToSnapshot"]); + } if (output["PromotionTier"] !== undefined) { contents.PromotionTier = __strictParseInt32(output["PromotionTier"]) as number; } diff --git a/clients/client-ec2-instance-connect/CHANGELOG.md b/clients/client-ec2-instance-connect/CHANGELOG.md index 5fb77175a88be..b96e8a290707e 100644 --- a/clients/client-ec2-instance-connect/CHANGELOG.md +++ b/clients/client-ec2-instance-connect/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-ec2-instance-connect:** This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs. ([a2aef86](https://github.com/aws/aws-sdk-js-v3/commit/a2aef86781ea0833877ada3ed3dcdbaab5d700ec)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-ec2-instance-connect diff --git a/clients/client-ec2-instance-connect/package.json b/clients/client-ec2-instance-connect/package.json index 7b92af3809b1b..9f13fdfc8c4a2 100644 --- a/clients/client-ec2-instance-connect/package.json +++ b/clients/client-ec2-instance-connect/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-ec2-instance-connect", "description": "AWS SDK for JavaScript Ec2 Instance Connect Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-ec2-instance-connect/src/models/models_0.ts b/clients/client-ec2-instance-connect/src/models/models_0.ts index 705135210a20a..6e8f25d2ae34d 100644 --- a/clients/client-ec2-instance-connect/src/models/models_0.ts +++ b/clients/client-ec2-instance-connect/src/models/models_0.ts @@ -90,6 +90,27 @@ export class EC2InstanceTypeInvalidException extends __BaseException { } } +/** + *The instance is currently unavailable. Wait a few minutes and try again.
+ */ +export class EC2InstanceUnavailableException extends __BaseException { + readonly name: "EC2InstanceUnavailableException" = "EC2InstanceUnavailableException"; + readonly $fault: "server" = "server"; + Message?: string; + /** + * @internal + */ + constructor(opts: __ExceptionOptionTypeOne of the parameters is not valid.
*/ diff --git a/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts b/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts index 287c7db997dae..c677abb234266 100644 --- a/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts +++ b/clients/client-ec2-instance-connect/src/protocols/Aws_json1_1.ts @@ -23,6 +23,7 @@ import { EC2InstanceNotFoundException, EC2InstanceStateInvalidException, EC2InstanceTypeInvalidException, + EC2InstanceUnavailableException, InvalidArgsException, SendSerialConsoleSSHPublicKeyRequest, SendSerialConsoleSSHPublicKeyResponse, @@ -101,6 +102,9 @@ const deserializeAws_json1_1SendSerialConsoleSSHPublicKeyCommandError = async ( case "EC2InstanceTypeInvalidException": case "com.amazonaws.ec2instanceconnect#EC2InstanceTypeInvalidException": throw await deserializeAws_json1_1EC2InstanceTypeInvalidExceptionResponse(parsedOutput, context); + case "EC2InstanceUnavailableException": + case "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException": + throw await deserializeAws_json1_1EC2InstanceUnavailableExceptionResponse(parsedOutput, context); case "InvalidArgsException": case "com.amazonaws.ec2instanceconnect#InvalidArgsException": throw await deserializeAws_json1_1InvalidArgsExceptionResponse(parsedOutput, context); @@ -169,6 +173,9 @@ const deserializeAws_json1_1SendSSHPublicKeyCommandError = async ( case "EC2InstanceStateInvalidException": case "com.amazonaws.ec2instanceconnect#EC2InstanceStateInvalidException": throw await deserializeAws_json1_1EC2InstanceStateInvalidExceptionResponse(parsedOutput, context); + case "EC2InstanceUnavailableException": + case "com.amazonaws.ec2instanceconnect#EC2InstanceUnavailableException": + throw await deserializeAws_json1_1EC2InstanceUnavailableExceptionResponse(parsedOutput, context); case "InvalidArgsException": case "com.amazonaws.ec2instanceconnect#InvalidArgsException": throw await deserializeAws_json1_1InvalidArgsExceptionResponse(parsedOutput, context); @@ -243,6 +250,19 @@ const deserializeAws_json1_1EC2InstanceTypeInvalidExceptionResponse = async ( return __decorateServiceException(exception, body); }; +const deserializeAws_json1_1EC2InstanceUnavailableExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): PromiseAllocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide. + *
Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide. *
*/ public allocateIpamPoolCidr( @@ -5146,7 +5146,7 @@ export class EC2 extends EC2Client { * to automate your IP address management workflows including assigning, tracking, * troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts * throughout your Amazon Web Services Organization. - *For more information, see Create an IPAM in the Amazon VPC IPAM User Guide. + *
For more information, see Create an IPAM in the Amazon VPC IPAM User Guide. *
*/ public createIpam(args: CreateIpamCommandInput, options?: __HttpHandlerOptions): PromiseCreate an IP address pool for Amazon VPC IP Address Manager (IPAM). In IPAM, a pool is a collection of contiguous IP addresses CIDRs. Pools enable you to organize your IP addresses according to your routing and security needs. For example, if you have separate routing and security needs for development and production applications, you can create a pool for each.
- *For more information, see Create a top-level pool in the Amazon VPC IPAM User Guide. + *
For more information, see Create a top-level pool in the Amazon VPC IPAM User Guide. *
*/ public createIpamPool( @@ -5208,7 +5208,7 @@ export class EC2 extends EC2Client { /** *Create an IPAM scope. In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict.
- *For more information, see Add a scope in the Amazon VPC IPAM User Guide.
+ *For more information, see Add a scope in the Amazon VPC IPAM User Guide.
*/ public createIpamScope( args: CreateIpamScopeCommandInput, @@ -5323,13 +5323,13 @@ export class EC2 extends EC2Client { } /** - *Creates a new version for a launch template. You can specify an existing version of + *
Creates a new version of a launch template. You can specify an existing version of * launch template from which to base the new version.
*Launch template versions are numbered in the order in which they are created. You * cannot specify, change, or replace the numbering of launch template versions.
*Launch templates are immutable; after you create a launch template, you can't modify it. * Instead, you can create a new version of the launch template that includes any changes you require.
- *For more information, see Modify a launch template (manage launch template versions)in the + *
For more information, see Modify a launch template (manage launch template versions) in the * Amazon Elastic Compute Cloud User Guide.
*/ public createLaunchTemplateVersion( @@ -6286,10 +6286,10 @@ export class EC2 extends EC2Client { * the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and * optional value. Tag keys must be unique per resource. * - *For more information about tags, see Tagging Your Resources in the + *
For more information about tags, see Tag your Amazon EC2 resources in the * Amazon Elastic Compute Cloud User Guide. For more information about * creating IAM policies that control users' access to resources based on tags, see Supported - * Resource-Level Permissions for Amazon EC2 API Actions in the Amazon + * resource-level permissions for Amazon EC2 API actions in the Amazon * Elastic Compute Cloud User Guide.
*/ public createTags(args: CreateTagsCommandInput, options?: __HttpHandlerOptions): PromiseRequests a transit gateway peering attachment between the specified transit gateway - * (requester) and a peer transit gateway (accepter). The transit gateways must be in - * different Regions. The peer transit gateway can be in your account or a different - * Amazon Web Services account.
+ * (requester) and a peer transit gateway (accepter). The peer transit gateway can be in + * your account or a different Amazon Web Services account. *After you create the peering attachment, the owner of the accepter transit gateway * must accept the attachment request.
*/ @@ -7596,7 +7595,7 @@ export class EC2 extends EC2Client { /** *Delete an IPAM. Deleting an IPAM removes all monitored data associated with the IPAM including the historical data for CIDRs.
- *For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide. + *
For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide. *
*/ public deleteIpam(args: DeleteIpamCommandInput, options?: __HttpHandlerOptions): PromiseFor more information, see Delete a pool in the Amazon VPC IPAM User Guide. + *
For more information, see Delete a pool in the Amazon VPC IPAM User Guide. *
*/ public deleteIpamPool( @@ -7663,7 +7662,7 @@ export class EC2 extends EC2Client { /** *Delete the scope for an IPAM. You cannot delete the default scopes.
- *For more information, see Delete a scope in the Amazon VPC IPAM User Guide. + *
For more information, see Delete a scope in the Amazon VPC IPAM User Guide. *
*/ public deleteIpamScope( @@ -8516,9 +8515,10 @@ export class EC2 extends EC2Client { /** *Deletes the specified set of tags from the specified set of resources.
- *To list the current tags, use DescribeTags. For more information about tags, see - * Tagging Your Resources - * in the Amazon Elastic Compute Cloud User Guide.
+ *To list the current tags, use DescribeTags. For more information about + * tags, see Tag + * your Amazon EC2 resources in the Amazon Elastic Compute Cloud User + * Guide.
*/ public deleteTags(args: DeleteTagsCommandInput, options?: __HttpHandlerOptions): PromiseDeprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
+ *Deprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
*/ public deprovisionIpamPoolCidr( args: DeprovisionIpamPoolCidrCommandInput, @@ -11504,7 +11504,7 @@ export class EC2 extends EC2Client { /** *Get information about your IPAM pools.
- *For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. + *
For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. *
*/ public describeIpams( @@ -13302,8 +13302,8 @@ export class EC2 extends EC2Client { /** *Describes the specified tags for your EC2 resources.
- *For more information about tags, see Tagging Your Resources in the - * Amazon Elastic Compute Cloud User Guide.
+ *For more information about tags, see Tag your Amazon EC2 resources in the + * Amazon Elastic Compute Cloud User Guide.
*/ public describeTags( args: DescribeTagsCommandInput, @@ -14722,7 +14722,7 @@ export class EC2 extends EC2Client { } /** - *Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. + *
Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. *
*/ public disableIpamOrganizationAdminAccount( @@ -15510,7 +15510,7 @@ export class EC2 extends EC2Client { } /** - *Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. + *
Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. *
*/ public enableIpamOrganizationAdminAccount( @@ -16406,7 +16406,7 @@ export class EC2 extends EC2Client { } /** - *Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
+ *Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
*/ public getIpamAddressHistory( args: GetIpamAddressHistoryCommandInput, @@ -16538,8 +16538,8 @@ export class EC2 extends EC2Client { * create a launch template. *This action calls on other describe actions to get instance information. Depending on
* your instance configuration, you may need to allow the following actions in your IAM
- * policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications,
- * DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, you can allow
+ * policy: DescribeSpotInstanceRequests
, DescribeInstanceCreditSpecifications
,
+ * DescribeVolumes
, DescribeInstanceAttribute
, and DescribeElasticGpus
. Or, you can allow
* describe*
depending on your instance requirements.
Modify the configurations of an IPAM pool.
- *For more information, see Modify a pool in the Amazon VPC IPAM User Guide. + *
For more information, see Modify a pool in the Amazon VPC IPAM User Guide. *
*/ public modifyIpamPool( @@ -18354,7 +18354,7 @@ export class EC2 extends EC2Client { /** *Modify a resource CIDR. You can use this action to transfer resource CIDRs between scopes and ignore resource CIDRs that you do not want to manage. If set to false, the resource will not be tracked for overlap, it cannot be auto-imported into a pool, and it will be removed from any pool it has an allocation in.
- *For more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
+ *For more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
*/ public modifyIpamResourceCidr( args: ModifyIpamResourceCidrCommandInput, @@ -19649,7 +19649,7 @@ export class EC2 extends EC2Client { /** *Move an BYOIP IPv4 CIDR to IPAM from a public IPv4 pool.
- *If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
+ *If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
*/ public moveByoipCidrToIpam( args: MoveByoipCidrToIpamCommandInput, @@ -19725,7 +19725,7 @@ export class EC2 extends EC2Client { /** *Provision a CIDR to an IPAM pool. You can use this action to provision new CIDRs to a top-level pool or to transfer a CIDR from a top-level pool to a pool within it.
- *For more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide. + *
For more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide. *
*/ public provisionIpamPoolCidr( @@ -19759,7 +19759,7 @@ export class EC2 extends EC2Client { /** *Provision a CIDR to a public IPv4 pool.
- *For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ public provisionPublicIpv4PoolCidr( args: ProvisionPublicIpv4PoolCidrCommandInput, @@ -19948,8 +19948,10 @@ export class EC2 extends EC2Client { * Creating your * own AMIs in the Amazon Elastic Compute Cloud User Guide. *For Amazon EBS-backed instances, CreateImage creates and registers - * the AMI in a single request, so you don't have to register the AMI yourself.
+ *For Amazon EBS-backed instances, CreateImage creates and registers the AMI + * in a single request, so you don't have to register the AMI yourself. We recommend that you + * always use CreateImage unless you have a specific reason to use + * RegisterImage.
*If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. @@ -20392,7 +20394,7 @@ export class EC2 extends EC2Client { } /** - *
Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide. + *
Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide. *
*/ public releaseIpamPoolAllocation( diff --git a/clients/client-ec2/src/commands/AllocateIpamPoolCidrCommand.ts b/clients/client-ec2/src/commands/AllocateIpamPoolCidrCommand.ts index 7245ccb26a0b9..a4e25ddb28091 100644 --- a/clients/client-ec2/src/commands/AllocateIpamPoolCidrCommand.ts +++ b/clients/client-ec2/src/commands/AllocateIpamPoolCidrCommand.ts @@ -23,7 +23,7 @@ export interface AllocateIpamPoolCidrCommandInput extends AllocateIpamPoolCidrRe export interface AllocateIpamPoolCidrCommandOutput extends AllocateIpamPoolCidrResult, __MetadataBearer {} /** - *Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide. + *
Allocate a CIDR from an IPAM pool. In IPAM, an allocation is a CIDR assignment from an IPAM pool to another resource or IPAM pool. For more information, see Allocate CIDRs in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateIpamCommand.ts b/clients/client-ec2/src/commands/CreateIpamCommand.ts index 52ec06d931a34..cb3c4a7216b5e 100644 --- a/clients/client-ec2/src/commands/CreateIpamCommand.ts +++ b/clients/client-ec2/src/commands/CreateIpamCommand.ts @@ -24,7 +24,7 @@ export interface CreateIpamCommandOutput extends CreateIpamResult, __MetadataBea * to automate your IP address management workflows including assigning, tracking, * troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts * throughout your Amazon Web Services Organization. - *For more information, see Create an IPAM in the Amazon VPC IPAM User Guide. + *
For more information, see Create an IPAM in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateIpamPoolCommand.ts b/clients/client-ec2/src/commands/CreateIpamPoolCommand.ts index 5b2346dfefc1c..5a084d597d02e 100644 --- a/clients/client-ec2/src/commands/CreateIpamPoolCommand.ts +++ b/clients/client-ec2/src/commands/CreateIpamPoolCommand.ts @@ -21,7 +21,7 @@ export interface CreateIpamPoolCommandOutput extends CreateIpamPoolResult, __Met /** *Create an IP address pool for Amazon VPC IP Address Manager (IPAM). In IPAM, a pool is a collection of contiguous IP addresses CIDRs. Pools enable you to organize your IP addresses according to your routing and security needs. For example, if you have separate routing and security needs for development and production applications, you can create a pool for each.
- *For more information, see Create a top-level pool in the Amazon VPC IPAM User Guide. + *
For more information, see Create a top-level pool in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateIpamScopeCommand.ts b/clients/client-ec2/src/commands/CreateIpamScopeCommand.ts index c4584b845b713..95797f4754a7e 100644 --- a/clients/client-ec2/src/commands/CreateIpamScopeCommand.ts +++ b/clients/client-ec2/src/commands/CreateIpamScopeCommand.ts @@ -21,7 +21,7 @@ export interface CreateIpamScopeCommandOutput extends CreateIpamScopeResult, __M /** *Create an IPAM scope. In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict.
- *For more information, see Add a scope in the Amazon VPC IPAM User Guide.
+ *For more information, see Add a scope in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/CreateLaunchTemplateVersionCommand.ts b/clients/client-ec2/src/commands/CreateLaunchTemplateVersionCommand.ts index 4aec70bb89dff..0419e38ac49b7 100644 --- a/clients/client-ec2/src/commands/CreateLaunchTemplateVersionCommand.ts +++ b/clients/client-ec2/src/commands/CreateLaunchTemplateVersionCommand.ts @@ -23,13 +23,13 @@ export interface CreateLaunchTemplateVersionCommandInput extends CreateLaunchTem export interface CreateLaunchTemplateVersionCommandOutput extends CreateLaunchTemplateVersionResult, __MetadataBearer {} /** - *Creates a new version for a launch template. You can specify an existing version of + *
Creates a new version of a launch template. You can specify an existing version of * launch template from which to base the new version.
*Launch template versions are numbered in the order in which they are created. You * cannot specify, change, or replace the numbering of launch template versions.
*Launch templates are immutable; after you create a launch template, you can't modify it. * Instead, you can create a new version of the launch template that includes any changes you require.
- *For more information, see Modify a launch template (manage launch template versions)in the + *
For more information, see Modify a launch template (manage launch template versions) in the * Amazon Elastic Compute Cloud User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateTagsCommand.ts b/clients/client-ec2/src/commands/CreateTagsCommand.ts index f0a80958765cd..c4f28195405b6 100644 --- a/clients/client-ec2/src/commands/CreateTagsCommand.ts +++ b/clients/client-ec2/src/commands/CreateTagsCommand.ts @@ -25,10 +25,10 @@ export interface CreateTagsCommandOutput extends __MetadataBearer {} * the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and * optional value. Tag keys must be unique per resource. * - *For more information about tags, see Tagging Your Resources in the + *
For more information about tags, see Tag your Amazon EC2 resources in the * Amazon Elastic Compute Cloud User Guide. For more information about * creating IAM policies that control users' access to resources based on tags, see Supported - * Resource-Level Permissions for Amazon EC2 API Actions in the Amazon + * resource-level permissions for Amazon EC2 API actions in the Amazon * Elastic Compute Cloud User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/CreateTransitGatewayPeeringAttachmentCommand.ts b/clients/client-ec2/src/commands/CreateTransitGatewayPeeringAttachmentCommand.ts index 056f79e4cf015..fb2928fa4b951 100644 --- a/clients/client-ec2/src/commands/CreateTransitGatewayPeeringAttachmentCommand.ts +++ b/clients/client-ec2/src/commands/CreateTransitGatewayPeeringAttachmentCommand.ts @@ -30,9 +30,8 @@ export interface CreateTransitGatewayPeeringAttachmentCommandOutput /** *Requests a transit gateway peering attachment between the specified transit gateway - * (requester) and a peer transit gateway (accepter). The transit gateways must be in - * different Regions. The peer transit gateway can be in your account or a different - * Amazon Web Services account.
+ * (requester) and a peer transit gateway (accepter). The peer transit gateway can be in + * your account or a different Amazon Web Services account. *After you create the peering attachment, the owner of the accepter transit gateway * must accept the attachment request.
* @example diff --git a/clients/client-ec2/src/commands/DeleteIpamCommand.ts b/clients/client-ec2/src/commands/DeleteIpamCommand.ts index cb4bcd974423a..e1ff203682cbc 100644 --- a/clients/client-ec2/src/commands/DeleteIpamCommand.ts +++ b/clients/client-ec2/src/commands/DeleteIpamCommand.ts @@ -21,7 +21,7 @@ export interface DeleteIpamCommandOutput extends DeleteIpamResult, __MetadataBea /** *Delete an IPAM. Deleting an IPAM removes all monitored data associated with the IPAM including the historical data for CIDRs.
- *For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide. + *
For more information, see Delete an IPAM in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/DeleteIpamPoolCommand.ts b/clients/client-ec2/src/commands/DeleteIpamPoolCommand.ts index d7b5ad4b838cd..269ea5ff1147e 100644 --- a/clients/client-ec2/src/commands/DeleteIpamPoolCommand.ts +++ b/clients/client-ec2/src/commands/DeleteIpamPoolCommand.ts @@ -26,7 +26,7 @@ export interface DeleteIpamPoolCommandOutput extends DeleteIpamPoolResult, __Met * allocations, see ReleaseIpamPoolAllocation. To deprovision pool * CIDRs, see DeprovisionIpamPoolCidr. * - *For more information, see Delete a pool in the Amazon VPC IPAM User Guide. + *
For more information, see Delete a pool in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/DeleteIpamScopeCommand.ts b/clients/client-ec2/src/commands/DeleteIpamScopeCommand.ts index f2498be5adbab..ac1256fbf562e 100644 --- a/clients/client-ec2/src/commands/DeleteIpamScopeCommand.ts +++ b/clients/client-ec2/src/commands/DeleteIpamScopeCommand.ts @@ -21,7 +21,7 @@ export interface DeleteIpamScopeCommandOutput extends DeleteIpamScopeResult, __M /** *Delete the scope for an IPAM. You cannot delete the default scopes.
- *For more information, see Delete a scope in the Amazon VPC IPAM User Guide. + *
For more information, see Delete a scope in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/DeleteTagsCommand.ts b/clients/client-ec2/src/commands/DeleteTagsCommand.ts index 132dc05288827..e68c0ae21bcf2 100644 --- a/clients/client-ec2/src/commands/DeleteTagsCommand.ts +++ b/clients/client-ec2/src/commands/DeleteTagsCommand.ts @@ -21,9 +21,10 @@ export interface DeleteTagsCommandOutput extends __MetadataBearer {} /** *Deletes the specified set of tags from the specified set of resources.
- *To list the current tags, use DescribeTags. For more information about tags, see - * Tagging Your Resources - * in the Amazon Elastic Compute Cloud User Guide.
+ *To list the current tags, use DescribeTags. For more information about + * tags, see Tag + * your Amazon EC2 resources in the Amazon Elastic Compute Cloud User + * Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/DeprovisionIpamPoolCidrCommand.ts b/clients/client-ec2/src/commands/DeprovisionIpamPoolCidrCommand.ts index 4d3de1f4c0cf6..c7f145eacb27b 100644 --- a/clients/client-ec2/src/commands/DeprovisionIpamPoolCidrCommand.ts +++ b/clients/client-ec2/src/commands/DeprovisionIpamPoolCidrCommand.ts @@ -23,7 +23,7 @@ export interface DeprovisionIpamPoolCidrCommandInput extends DeprovisionIpamPool export interface DeprovisionIpamPoolCidrCommandOutput extends DeprovisionIpamPoolCidrResult, __MetadataBearer {} /** - *Deprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
+ *Deprovision a CIDR provisioned from an IPAM pool. If you deprovision a CIDR from a pool that has a source pool, the CIDR is recycled back into the source pool. For more information, see Deprovision pool CIDRs in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/DescribeIpamsCommand.ts b/clients/client-ec2/src/commands/DescribeIpamsCommand.ts index bf476b314c11d..47d82ffd217bf 100644 --- a/clients/client-ec2/src/commands/DescribeIpamsCommand.ts +++ b/clients/client-ec2/src/commands/DescribeIpamsCommand.ts @@ -21,7 +21,7 @@ export interface DescribeIpamsCommandOutput extends DescribeIpamsResult, __Metad /** *Get information about your IPAM pools.
- *For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. + *
For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/DescribeTagsCommand.ts b/clients/client-ec2/src/commands/DescribeTagsCommand.ts index 19bdc46058764..d54c5c45ee1e0 100644 --- a/clients/client-ec2/src/commands/DescribeTagsCommand.ts +++ b/clients/client-ec2/src/commands/DescribeTagsCommand.ts @@ -21,8 +21,8 @@ export interface DescribeTagsCommandOutput extends DescribeTagsResult, __Metadat /** *Describes the specified tags for your EC2 resources.
- *For more information about tags, see Tagging Your Resources in the - * Amazon Elastic Compute Cloud User Guide.
+ *For more information about tags, see Tag your Amazon EC2 resources in the + * Amazon Elastic Compute Cloud User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/DisableIpamOrganizationAdminAccountCommand.ts b/clients/client-ec2/src/commands/DisableIpamOrganizationAdminAccountCommand.ts index adb0f599a6996..d6b27338d1054 100644 --- a/clients/client-ec2/src/commands/DisableIpamOrganizationAdminAccountCommand.ts +++ b/clients/client-ec2/src/commands/DisableIpamOrganizationAdminAccountCommand.ts @@ -28,7 +28,7 @@ export interface DisableIpamOrganizationAdminAccountCommandOutput __MetadataBearer {} /** - *Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. + *
Disable the IPAM account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/EnableIpamOrganizationAdminAccountCommand.ts b/clients/client-ec2/src/commands/EnableIpamOrganizationAdminAccountCommand.ts index e0de927221e7c..142f6bcbe8175 100644 --- a/clients/client-ec2/src/commands/EnableIpamOrganizationAdminAccountCommand.ts +++ b/clients/client-ec2/src/commands/EnableIpamOrganizationAdminAccountCommand.ts @@ -28,7 +28,7 @@ export interface EnableIpamOrganizationAdminAccountCommandOutput __MetadataBearer {} /** - *Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. + *
Enable an Organizations member account as the IPAM admin account. You cannot select the Organizations management account as the IPAM admin account. For more information, see Enable integration with Organizations in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/GetIpamAddressHistoryCommand.ts b/clients/client-ec2/src/commands/GetIpamAddressHistoryCommand.ts index 760cbf3ea650b..2bb51b0511a4c 100644 --- a/clients/client-ec2/src/commands/GetIpamAddressHistoryCommand.ts +++ b/clients/client-ec2/src/commands/GetIpamAddressHistoryCommand.ts @@ -23,7 +23,7 @@ export interface GetIpamAddressHistoryCommandInput extends GetIpamAddressHistory export interface GetIpamAddressHistoryCommandOutput extends GetIpamAddressHistoryResult, __MetadataBearer {} /** - *Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
+ *Retrieve historical information about a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts b/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts index f6c2f0a3a6162..7e193507816fa 100644 --- a/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts +++ b/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts @@ -27,8 +27,8 @@ export interface GetLaunchTemplateDataCommandOutput extends GetLaunchTemplateDat * create a launch template. *This action calls on other describe actions to get instance information. Depending on
* your instance configuration, you may need to allow the following actions in your IAM
- * policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications,
- * DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, you can allow
+ * policy: DescribeSpotInstanceRequests
, DescribeInstanceCreditSpecifications
,
+ * DescribeVolumes
, DescribeInstanceAttribute
, and DescribeElasticGpus
. Or, you can allow
* describe*
depending on your instance requirements.
Modify the configurations of an IPAM pool.
- *For more information, see Modify a pool in the Amazon VPC IPAM User Guide. + *
For more information, see Modify a pool in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/ModifyIpamResourceCidrCommand.ts b/clients/client-ec2/src/commands/ModifyIpamResourceCidrCommand.ts index 7744b4467feb9..cf70ea1e5ed18 100644 --- a/clients/client-ec2/src/commands/ModifyIpamResourceCidrCommand.ts +++ b/clients/client-ec2/src/commands/ModifyIpamResourceCidrCommand.ts @@ -24,7 +24,7 @@ export interface ModifyIpamResourceCidrCommandOutput extends ModifyIpamResourceC /** *Modify a resource CIDR. You can use this action to transfer resource CIDRs between scopes and ignore resource CIDRs that you do not want to manage. If set to false, the resource will not be tracked for overlap, it cannot be auto-imported into a pool, and it will be removed from any pool it has an allocation in.
- *For more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
+ *For more information, see Move resource CIDRs between scopes and Change the monitoring state of resource CIDRs in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/MoveByoipCidrToIpamCommand.ts b/clients/client-ec2/src/commands/MoveByoipCidrToIpamCommand.ts index e6ba50796729f..9e36aa6fa7deb 100644 --- a/clients/client-ec2/src/commands/MoveByoipCidrToIpamCommand.ts +++ b/clients/client-ec2/src/commands/MoveByoipCidrToIpamCommand.ts @@ -24,7 +24,7 @@ export interface MoveByoipCidrToIpamCommandOutput extends MoveByoipCidrToIpamRes /** *Move an BYOIP IPv4 CIDR to IPAM from a public IPv4 pool.
- *If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
+ *If you already have an IPv4 BYOIP CIDR with Amazon Web Services, you can move the CIDR to IPAM from a public IPv4 pool. You cannot move an IPv6 CIDR to IPAM. If you are bringing a new IP address to Amazon Web Services for the first time, complete the steps in Tutorial: BYOIP address CIDRs to IPAM.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/ProvisionIpamPoolCidrCommand.ts b/clients/client-ec2/src/commands/ProvisionIpamPoolCidrCommand.ts index a30192bc4fa88..a9f58a7048305 100644 --- a/clients/client-ec2/src/commands/ProvisionIpamPoolCidrCommand.ts +++ b/clients/client-ec2/src/commands/ProvisionIpamPoolCidrCommand.ts @@ -24,7 +24,7 @@ export interface ProvisionIpamPoolCidrCommandOutput extends ProvisionIpamPoolCid /** *Provision a CIDR to an IPAM pool. You can use this action to provision new CIDRs to a top-level pool or to transfer a CIDR from a top-level pool to a pool within it.
- *For more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide. + *
For more information, see Provision CIDRs to pools in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/commands/ProvisionPublicIpv4PoolCidrCommand.ts b/clients/client-ec2/src/commands/ProvisionPublicIpv4PoolCidrCommand.ts index 4adbe3dea85ee..ca84e64a92bd8 100644 --- a/clients/client-ec2/src/commands/ProvisionPublicIpv4PoolCidrCommand.ts +++ b/clients/client-ec2/src/commands/ProvisionPublicIpv4PoolCidrCommand.ts @@ -24,7 +24,7 @@ export interface ProvisionPublicIpv4PoolCidrCommandOutput extends ProvisionPubli /** *Provision a CIDR to a public IPv4 pool.
- *For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/RegisterImageCommand.ts b/clients/client-ec2/src/commands/RegisterImageCommand.ts index ebef959c42dbe..97f01755be2f3 100644 --- a/clients/client-ec2/src/commands/RegisterImageCommand.ts +++ b/clients/client-ec2/src/commands/RegisterImageCommand.ts @@ -25,8 +25,10 @@ export interface RegisterImageCommandOutput extends RegisterImageResult, __Metad * Creating your * own AMIs in the Amazon Elastic Compute Cloud User Guide. *For Amazon EBS-backed instances, CreateImage creates and registers - * the AMI in a single request, so you don't have to register the AMI yourself.
+ *For Amazon EBS-backed instances, CreateImage creates and registers the AMI + * in a single request, so you don't have to register the AMI yourself. We recommend that you + * always use CreateImage unless you have a specific reason to use + * RegisterImage.
*If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. diff --git a/clients/client-ec2/src/commands/ReleaseIpamPoolAllocationCommand.ts b/clients/client-ec2/src/commands/ReleaseIpamPoolAllocationCommand.ts index 40d31b28e4cc7..05248fc54f5e4 100644 --- a/clients/client-ec2/src/commands/ReleaseIpamPoolAllocationCommand.ts +++ b/clients/client-ec2/src/commands/ReleaseIpamPoolAllocationCommand.ts @@ -23,7 +23,7 @@ export interface ReleaseIpamPoolAllocationCommandInput extends ReleaseIpamPoolAl export interface ReleaseIpamPoolAllocationCommandOutput extends ReleaseIpamPoolAllocationResult, __MetadataBearer {} /** - *
Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide. + *
Release an allocation within an IPAM pool. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide. *
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/src/models/models_0.ts b/clients/client-ec2/src/models/models_0.ts index c6200fe546eea..5f0df6b1fb4e6 100644 --- a/clients/client-ec2/src/models/models_0.ts +++ b/clients/client-ec2/src/models/models_0.ts @@ -373,7 +373,11 @@ export interface PeeringTgwInfo { */ TransitGatewayId?: string; + /** + *The ID of the core network where the transit gateway peer is located.
+ */ CoreNetworkId?: string; + /** *The ID of the Amazon Web Services account that owns the transit gateway.
*/ @@ -399,7 +403,13 @@ export enum DynamicRoutingValue { enable = "enable", } +/** + *Describes dynamic routing for the transit gateway peering attachment.
+ */ export interface TransitGatewayPeeringAttachmentOptions { + /** + *Describes whether dynamic routing is enabled or disabled for the transit gateway peering attachment.
+ */ DynamicRouting?: DynamicRoutingValue | string; } @@ -502,7 +512,11 @@ export interface TransitGatewayPeeringAttachment { */ AccepterTgwInfo?: PeeringTgwInfo; + /** + *Details about the transit gateway peering attachment.
+ */ Options?: TransitGatewayPeeringAttachmentOptions; + /** *The status of the transit gateway peering attachment.
*/ @@ -1866,7 +1880,7 @@ export enum ActivityStatus { /** *Add an operating Region to an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only * discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
- *For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. + *
For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. *
*/ export interface AddIpamOperatingRegion { @@ -2230,6 +2244,12 @@ export type ResourceType = /** *The tags to apply to a resource when the resource is being created.
+ *The Valid Values
lists all the resource types that can be tagged.
+ * However, the action you're using might not support tagging all of these resource types.
+ * If you try to tag a resource type that is unsupported for the action you're using,
+ * you'll get an error.
Associate a CIDR allocated from an IPv4 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *Associate a CIDR allocated from an IPv4 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv4IpamPoolId?: string; /** - *The netmask length of the IPv4 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide. + *
The netmask length of the IPv4 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide. *
*/ Ipv4NetmaskLength?: number; /** - *Associates a CIDR allocated from an IPv6 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *Associates a CIDR allocated from an IPv6 IPAM pool to a VPC. For more information about Amazon VPC IP Address Manager (IPAM), see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv6IpamPoolId?: string; /** - *The netmask length of the IPv6 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *The netmask length of the IPv6 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv6NetmaskLength?: number; } diff --git a/clients/client-ec2/src/models/models_1.ts b/clients/client-ec2/src/models/models_1.ts index 6459ec109f167..d74f74f9f62a5 100644 --- a/clients/client-ec2/src/models/models_1.ts +++ b/clients/client-ec2/src/models/models_1.ts @@ -37,19 +37,22 @@ import { } from "./models_0"; /** - *Describes the Amazon EC2 launch template and the launch template version that can be used by
- * an EC2 Fleet to configure Amazon EC2 instances. For information about launch templates, see Launching
+ * The Amazon EC2 launch template that can be used by
+ * an EC2 Fleet to configure Amazon EC2 instances. You must specify either the ID or name of the launch template in the request, but not both. For information about launch templates, see Launch
* an instance from a launch template in the
* Amazon EC2 User Guide. The ID of the launch template. If you specify the template ID, you can't specify the template name. The ID of the launch template. You must specify the The name of the launch template. If you specify the template name, you can't specify the template ID. The name of the launch template. You must specify the The price protection threshold for Spot Instance. This is the maximum you’ll pay for an Spot Instance,
- * expressed as a percentage above the cheapest M, C, or R instance type with your specified
+ * expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified
* attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance
* types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,
- * expressed as a percentage above the cheapest M, C, or R instance type with your specified
+ * expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified
* attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance
* types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. The maximum price per unit hour that you are willing to pay for a Spot Instance. The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ * If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter. The maximum amount per hour for Spot Instances that you're willing to pay. The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend
+ * using this parameter because it can lead to increased interruptions. If you do not specify
+ * this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. Describes the Amazon EC2 launch template and the launch template version that can be used
- * by a Spot Fleet request to configure Amazon EC2 instances. For information about launch templates,
- * see Launching an instance from a launch template in the
+ * The Amazon EC2 launch template that can be used by
+ * a Spot Fleet to configure Amazon EC2 instances. You must specify either the ID or name of the launch template in the request, but not both. For information about launch templates,
+ * see Launch an instance from a launch template in the
* Amazon EC2 User Guide for Linux Instances. The ID of the launch template. If you specify the template ID, you can't specify the
- * template name. The ID of the launch template. You must specify the The name of the launch template. If you specify the template name, you can't specify
- * the template ID. The name of the launch template. You must specify the The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance,
- * expressed as a percentage above the cheapest M, C, or R instance type with your specified
+ * expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified
* attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance
* types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance,
- * expressed as a percentage above the cheapest M, C, or R instance type with your specified
+ * expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified
* attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance
* types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. The maximum price per unit hour that you are willing to pay for a Spot Instance. The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ * If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter. The operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only
* discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
+ * For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
* The operating Regions for an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only
* discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. The number of scopes in the IPAM. The scope quota is 5. For more information on quotas, see Quotas in IPAM in the Amazon VPC IPAM User Guide.
+ * The number of scopes in the IPAM. The scope quota is 5. For more information on quotas, see Quotas in IPAM in the Amazon VPC IPAM User Guide.
* The operating Regions for an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only
* discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide. The depth of pools in your IPAM pool. The pool depth quota is 10. For more information, see Quotas in IPAM in the Amazon VPC IPAM User Guide.
+ * The depth of pools in your IPAM pool. The pool depth quota is 10. For more information, see Quotas in IPAM in the Amazon VPC IPAM User Guide.
* In IPAM, a scope is the highest-level container within IPAM. An IPAM contains two default scopes. Each scope represents the IP space for a single network. The private scope is intended for all private IP address space. The public scope is intended for all public IP address space. Scopes enable you to reuse IP addresses across multiple unconnected networks without causing IP address overlap or conflict. For more information, see How IPAM works in the Amazon VPC IPAM User Guide. For more information, see How IPAM works in the Amazon VPC IPAM User Guide. The number of threads per CPU core. To disable multithreading for the instance,
- * specify a value of 1. Otherwise, specify the default value of 2.LaunchTemplateId
or the LaunchTemplateName
, but not both.LaunchTemplateName
or the LaunchTemplateId
, but not both.LaunchTemplateId
or the LaunchTemplateName
, but not both.LaunchTemplateName
or the LaunchTemplateId
, but not both.1
. Otherwise, specify the default value of 2
.
The credit option for CPU usage of a T2, T3, or T3a instance.
+ *The credit option for CPU usage of a T instance.
*/ export interface CreditSpecificationRequest { /** - *The credit option for CPU usage of a T2, T3, or T3a instance. Valid values are
- * standard
and unlimited
.
The credit option for CPU usage of a T instance.
+ *Valid values: standard
| unlimited
+ *
The maximum hourly price you're willing to pay for the Spot Instances.
+ *The maximum hourly price you're willing to pay for the Spot Instances. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
+ *The required duration for the Spot Instances (also known as Spot blocks), in minutes. - * This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).
+ *Deprecated.
*/ BlockDurationMinutes?: number; /** - *The end date of the request. For a one-time request, the request remains active until - * all instances launch, the request is canceled, or this date is reached. If the request - * is persistent, it remains active until it is canceled or this date and time is reached. - * The default end date is 7 days from the current date.
+ *The end date of the request, in UTC format + * (YYYY-MM-DDTHH:MM:SSZ). Supported only for + * persistent requests.
+ *For a persistent request, the request remains active until the ValidUntil
+ * date and time is reached. Otherwise, the request remains active until you cancel it.
For a one-time request, ValidUntil
is not supported. The request remains active until
+ * all instances launch or you cancel the request.
Default: 7 days from the current date
*/ ValidUntil?: Date; @@ -4141,7 +4172,7 @@ export enum LaunchTemplateInstanceMetadataTagsState { } /** - *The metadata options for the instance. For more information, see Instance Metadata and User Data in the + *
The metadata options for the instance. For more information, see Instance metadata and user data in the * Amazon Elastic Compute Cloud User Guide.
*/ export interface LaunchTemplateInstanceMetadataOptionsRequest { @@ -4163,7 +4194,8 @@ export interface LaunchTemplateInstanceMetadataOptionsRequest { /** *The desired HTTP PUT response hop limit for instance metadata requests. The larger the * number, the further instance metadata requests can travel.
- *Default: 1
+ *Default: 1
+ *
Possible values: Integers from 1 to 64
*/ HttpPutResponseHopLimit?: number; @@ -5002,14 +5034,14 @@ export interface CreateLaunchTemplateVersionRequest { ClientToken?: string; /** - *The ID of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The ID of the launch template.
+ *You must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The name of the launch template.
+ *You must specify the LaunchTemplateName
or the LaunchTemplateId
, but not both.
Describes the credit option for CPU usage of a T2, T3, or T3a instance.
+ *Describes the credit option for CPU usage of a T instance.
*/ export interface CreditSpecification { /** - *The credit option for CPU usage of a T2, T3, or T3a instance. Valid values are
- * standard
and unlimited
.
The credit option for CPU usage of a T instance.
+ *Valid values: standard
| unlimited
+ *
The maximum hourly price you're willing to pay for the Spot Instances.
+ *The maximum hourly price you're willing to pay for the Spot Instances. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
+ *The metadata options for the instance. For more information, see Instance Metadata and User Data in the + *
The metadata options for the instance. For more information, see Instance metadata and user data in the * Amazon Elastic Compute Cloud User Guide.
*/ export interface LaunchTemplateInstanceMetadataOptions { diff --git a/clients/client-ec2/src/models/models_2.ts b/clients/client-ec2/src/models/models_2.ts index 5c82044f3fcb1..7adc0e0860988 100644 --- a/clients/client-ec2/src/models/models_2.ts +++ b/clients/client-ec2/src/models/models_2.ts @@ -712,7 +712,13 @@ export namespace CreateTransitGatewayMulticastDomainResult { }); } +/** + *Describes whether dynamic routing is enabled or disabled for the transit gateway peering request.
+ */ export interface CreateTransitGatewayPeeringAttachmentRequestOptions { + /** + *Indicates whether dynamic routing is enabled or disabled.
+ */ DynamicRouting?: DynamicRoutingValue | string; } @@ -746,7 +752,11 @@ export interface CreateTransitGatewayPeeringAttachmentRequest { */ PeerRegion: string | undefined; + /** + *Requests a transit gateway peering attachment.
+ */ Options?: CreateTransitGatewayPeeringAttachmentRequestOptions; + /** *The tags to apply to the transit gateway peering attachment.
*/ @@ -1090,7 +1100,11 @@ export interface TransitGatewayRoute { */ PrefixListId?: string; + /** + *The ID of the transit gateway route table announcement.
+ */ TransitGatewayRouteTableAnnouncementId?: string; + /** *The attachments.
*/ @@ -1288,13 +1302,21 @@ export interface TransitGatewayRouteTableAnnouncement { */ TransitGatewayId?: string; + /** + *The ID of the core network for the transit gateway route table announcement.
+ */ CoreNetworkId?: string; + /** *The ID of the peer transit gateway.
*/ PeerTransitGatewayId?: string; + /** + *The ID of the core network ID for the peer.
+ */ PeerCoreNetworkId?: string; + /** *The ID of the peering attachment.
*/ @@ -1748,24 +1770,24 @@ export interface CreateVpcRequest { Ipv6CidrBlock?: string; /** - *The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. + *
The ID of an IPv4 IPAM pool you want to use for allocating this VPC's CIDR. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide. * *
*/ Ipv4IpamPoolId?: string; /** - *The netmask length of the IPv4 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *The netmask length of the IPv4 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv4NetmaskLength?: number; /** - *The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6 CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *The ID of an IPv6 IPAM pool which will be used to allocate this VPC an IPv6 CIDR. IPAM is a VPC feature that you can use to automate your IP address management workflows including assigning, tracking, troubleshooting, and auditing IP addresses across Amazon Web Services Regions and accounts throughout your Amazon Web Services Organization. For more information, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv6IpamPoolId?: string; /** - *The netmask length of the IPv6 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
+ *The netmask length of the IPv6 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.
*/ Ipv6NetmaskLength?: number; @@ -4515,14 +4537,14 @@ export interface DeleteLaunchTemplateRequest { DryRun?: boolean; /** - *The ID of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The ID of the launch template.
+ *You must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The name of the launch template.
+ *You must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The ID of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The ID of the launch template.
+ *You must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The name of the launch template.
+ *You must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The maximum amount per hour for Spot Instances that you're willing to pay.
+ *The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify + * this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
+ *The ID of the launch template. To describe one or more versions of a specified launch - * template, you must specify either the launch template ID or the launch template name in - * the request. To describe all the latest or default launch template versions in your + *
The ID of the launch template.
+ *To describe one or more versions of a specified launch
+ * template, you must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
To describe all the latest or default launch template versions in your * account, you must omit this parameter.
*/ LaunchTemplateId?: string; /** - *The name of the launch template. To describe one or more versions of a specified - * launch template, you must specify either the launch template ID or the launch template - * name in the request. To describe all the latest or default launch template versions in + *
The name of the launch template.
+ *To describe one or more versions of a specified
+ * launch template, you must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
To describe all the latest or default launch template versions in * your account, you must omit this parameter.
*/ LaunchTemplateName?: string; @@ -10027,7 +10034,7 @@ export interface DescribeLaunchTemplateVersionsRequest { * version, the valid value is$Latest
. To describe all launch templates in
* your account that are defined as the default version, the valid value is
* $Default
. You can specify $Latest
and
- * $Default
in the same call. You cannot specify numbers.
+ * $Default
in the same request. You cannot specify numbers.
*/
Versions?: string[];
@@ -10075,7 +10082,7 @@ export interface DescribeLaunchTemplateVersionsRequest {
*
* http-protocol-ipv4
- Indicates whether the IPv4 endpoint for the
* instance metadata service is enabled (enabled
|
- * disabled
).
disabled
).
* diff --git a/clients/client-ec2/src/models/models_4.ts b/clients/client-ec2/src/models/models_4.ts index 364a3ef8814cf..d8f31298259f2 100644 --- a/clients/client-ec2/src/models/models_4.ts +++ b/clients/client-ec2/src/models/models_4.ts @@ -4511,10 +4511,11 @@ export interface SpotFleetLaunchSpecification { RamdiskId?: string; /** - *
The maximum price per unit hour that you are willing to pay for a Spot Instance.
- * If this value is not specified, the default is the Spot price specified for the fleet.
- * To determine the Spot price per unit hour, divide the Spot price by the
- * value of WeightedCapacity
.
The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to + * increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum price per unit hour that you are willing to pay for a Spot - * Instance.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to + * increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. The - * default is the On-Demand price.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum price per hour that you are willing to pay for a Spot Instance.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *Describes the maximum price per hour that you are willing to pay for a Spot - * Instance.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum price per hour that you are willing to pay for a Spot Instance.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The Amazon Resource Name (ARN) of the service.
+ *The name of the service.
*/ ServiceName?: string; diff --git a/clients/client-ec2/src/models/models_5.ts b/clients/client-ec2/src/models/models_5.ts index 97c931ac44d7d..0126f2f548476 100644 --- a/clients/client-ec2/src/models/models_5.ts +++ b/clients/client-ec2/src/models/models_5.ts @@ -1479,7 +1479,7 @@ export enum IpamAddressHistoryResourceType { } /** - *The historical record of a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide. + *
The historical record of a CIDR within an IPAM scope. For more information, see View the history of IP addresses in the Amazon VPC IPAM User Guide. *
*/ export interface IpamAddressHistoryRecord { @@ -1514,12 +1514,12 @@ export interface IpamAddressHistoryRecord { ResourceName?: string; /** - *The compliance status of a resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The compliance status of a resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ ResourceComplianceStatus?: IpamComplianceStatus | string; /** - *The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ ResourceOverlapStatus?: IpamOverlapStatus | string; @@ -1824,22 +1824,36 @@ export interface IpamResourceCidr { ResourceTags?: IpamResourceTag[]; /** - *The IP address space in the IPAM pool that is allocated to this resource. To convert the decimal to a percentage, multiply the decimal by 100.
+ *The percentage of IP address space in use. To convert the decimal to a percentage, multiply the decimal by 100. Note the following:
+ *For a resources that are VPCs, this is the percentage of IP address space in the VPC that's taken up by subnet CIDRs. + *
+ *For resources that are subnets, if the subnet has an IPv4 CIDR provisioned to it, this is the percentage of IPv4 address space in the subnet that's in use. If the subnet has an IPv6 CIDR provisioned to it, the percentage of IPv6 address space in use is not represented. The percentage of IPv6 address space in use cannot currently be calculated. + *
+ *For resources that are public IPv4 pools, this is the percentage of IP address space in the pool that's been allocated to Elastic IP addresses (EIPs). + *
+ *The compliance status of the IPAM resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The compliance status of the IPAM resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ ComplianceStatus?: IpamComplianceStatus | string; /** - *The management state of the resource. For more information about management states, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The management state of the resource. For more information about management states, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ ManagementState?: IpamManagementState | string; /** - *The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
+ *The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.
*/ OverlapStatus?: IpamOverlapStatus | string; @@ -2834,12 +2848,10 @@ export interface GetTransitGatewayMulticastDomainAssociationsRequest { *resource-type
- The type of resource. The valid value is: vpc
.
*
* state
- The state of the subnet association. Valid values are
- * associated
|
- * associating
- * | disassociated
| disassociating
.
associated
| associating
|
+ * disassociated
| disassociating
.
* @@ -6461,7 +6473,7 @@ export namespace ModifyInstancePlacementResult { /** *
Remove an operating Region from an IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only * discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
- *For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide + *
For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide *
*/ export interface RemoveIpamOperatingRegion { @@ -6501,7 +6513,7 @@ export interface ModifyIpamRequest { /** *Choose the operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only * discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions.
- *For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
+ *For more information about operating Regions, see Create an IPAM in the Amazon VPC IPAM User Guide.
*/ AddOperatingRegions?: AddIpamOperatingRegion[]; @@ -6591,7 +6603,7 @@ export interface ModifyIpamPoolRequest { ClearAllocationDefaultNetmaskLength?: boolean; /** - *Add tag allocation rules to a pool. For more information about allocation rules, see Create a top-level pool in the Amazon VPC IPAM User Guide.
+ *Add tag allocation rules to a pool. For more information about allocation rules, see Create a top-level pool in the Amazon VPC IPAM User Guide.
*/ AddAllocationResourceTags?: RequestIpamResourceTag[]; @@ -6752,14 +6764,14 @@ export interface ModifyLaunchTemplateRequest { ClientToken?: string; /** - *The ID of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The ID of the launch template.
+ *You must specify either the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template. You must specify either the launch template ID or - * launch template name in the request.
+ *The name of the launch template.
+ *You must specify either the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The maximum price per hour that you are willing to pay for a Spot Instance. The - * default is the On-Demand price.
+ *The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.
+ *The maximum hourly price you're willing to pay for the Spot Instances. The default is - * the On-Demand price.
+ *The maximum hourly price that you're willing to pay for a Spot Instance. We do not recommend + * using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.
+ *If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.
+ *The ID of the launch template.
+ *You must specify the LaunchTemplateId
or the LaunchTemplateName
, but not both.
The name of the launch template.
+ *You must specify the LaunchTemplateName
or the LaunchTemplateId
, but not both.
The tags to apply to the resources during launch. You can only tag instances and - * volumes on launch. The specified tags are applied to all instances or volumes that are - * created during launch. To tag a resource after it has been created, see CreateTags.
+ *The tags to apply to the resources that are created during instance launch.
+ *You can specify tags for the following resources only:
+ *Instances
+ *Volumes
+ *Elastic graphics
+ *Spot Instance requests
+ *Network interfaces
+ *To tag a resource after it has been created, see CreateTags.
*/ TagSpecifications?: TagSpecification[]; /** *The launch template to use to launch the instances. Any parameters that you specify in - * RunInstances override the same parameters in the launch template. + * RunInstances override the same parameters in the launch template. * You can specify either the name or ID of a launch template, but not both.
*/ LaunchTemplate?: LaunchTemplateSpecification; @@ -2757,10 +2782,9 @@ export interface RunInstancesRequest { * arestandard
and unlimited
. To change this attribute after
* launch, use
* ModifyInstanceCreditSpecification. For more information, see Burstable
- * performance instances in the Amazon EC2 User Guide.
+ * performance instances in the Amazon EC2 User Guide.
* Default: standard
(T2 instances) or unlimited
(T3/T3a
* instances)
For T3 instances with host
tenancy, only standard
is
* supported.
Indicates whether an instance is enabled for hibernation. For more information, see - * Hibernate + * Hibernate * your instance in the Amazon EC2 User Guide.
*You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same * instance.
@@ -2802,7 +2826,7 @@ export interface RunInstancesRequest { *Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For * more information, see What is Amazon Web Services Nitro * Enclaves? in the Amazon Web Services Nitro Enclaves User - * Guide.
+ * Guide. *You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same * instance.
*/ @@ -2822,7 +2846,7 @@ export interface RunInstancesRequest { /** *Indicates whether an instance is enabled for stop protection. For more information, * see Stop - * Protection. + * protection. *
*/ DisableApiStop?: boolean; diff --git a/clients/client-elasticache/CHANGELOG.md b/clients/client-elasticache/CHANGELOG.md index be995af834c7f..8757b2e08972b 100644 --- a/clients/client-elasticache/CHANGELOG.md +++ b/clients/client-elasticache/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-elasticache:** Adding AutoMinorVersionUpgrade in the DescribeReplicationGroups API ([e694134](https://github.com/aws/aws-sdk-js-v3/commit/e694134054db9c206aec4b6e6fa26b8ab807d6eb)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-elasticache diff --git a/clients/client-elasticache/package.json b/clients/client-elasticache/package.json index a60daee08e202..d4b236a6c0cd1 100644 --- a/clients/client-elasticache/package.json +++ b/clients/client-elasticache/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-elasticache", "description": "AWS SDK for JavaScript Elasticache Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-elasticache/src/models/models_0.ts b/clients/client-elasticache/src/models/models_0.ts index cc0009c505e0b..9b0e70093349f 100644 --- a/clients/client-elasticache/src/models/models_0.ts +++ b/clients/client-elasticache/src/models/models_0.ts @@ -1380,6 +1380,12 @@ export interface ReplicationGroup { * For more information, see Data tiering. */ DataTiering?: DataTieringStatus | string; + + /** + *If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + *
+ */ + AutoMinorVersionUpgrade?: boolean; } export namespace ReplicationGroup { @@ -1708,7 +1714,7 @@ export interface Snapshot { * *Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -1738,7 +1744,7 @@ export interface Snapshot {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -1853,7 +1859,7 @@ export interface Snapshot {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -2385,7 +2391,7 @@ export interface CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -2415,7 +2421,7 @@ export interface CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -2488,7 +2494,7 @@ export interface CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -2806,7 +2812,7 @@ export namespace CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -2836,7 +2842,7 @@ export namespace CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -2940,7 +2946,7 @@ export namespace CreateCacheClusterMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -3296,7 +3302,7 @@ export interface CacheCluster {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -3326,7 +3332,7 @@ export interface CacheCluster {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -3431,7 +3437,7 @@ export interface CacheCluster {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -4799,7 +4805,7 @@ export interface CreateReplicationGroupMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -4829,7 +4835,7 @@ export interface CreateReplicationGroupMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -4934,7 +4940,7 @@ export interface CreateReplicationGroupMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -7459,7 +7465,7 @@ export interface DescribeReservedCacheNodesMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -7489,7 +7495,7 @@ export interface DescribeReservedCacheNodesMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -7593,7 +7599,7 @@ export interface DescribeReservedCacheNodesMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -7808,7 +7814,7 @@ export interface ReservedCacheNode {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -7838,7 +7844,7 @@ export interface ReservedCacheNode {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -7942,7 +7948,7 @@ export interface ReservedCacheNode {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -8171,7 +8177,7 @@ export interface DescribeReservedCacheNodesOfferingsMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -8201,7 +8207,7 @@ export interface DescribeReservedCacheNodesOfferingsMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -8307,7 +8313,7 @@ export interface DescribeReservedCacheNodesOfferingsMessage {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
@@ -8494,7 +8500,7 @@ export interface ReservedCacheNodesOffering {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* T1 node types:
* cache.t1.micro
@@ -8524,7 +8530,7 @@ export interface ReservedCacheNodesOffering {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* C1 node types:
* cache.c1.xlarge
@@ -8630,7 +8636,7 @@ export interface ReservedCacheNodesOffering {
*
*
Previous generation: (not recommended)
+ *Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)
*
* M2 node types:
* cache.m2.xlarge
,
diff --git a/clients/client-elasticache/src/protocols/Aws_query.ts b/clients/client-elasticache/src/protocols/Aws_query.ts
index 42d4d5e960cd1..3c7261af9158c 100644
--- a/clients/client-elasticache/src/protocols/Aws_query.ts
+++ b/clients/client-elasticache/src/protocols/Aws_query.ts
@@ -11326,6 +11326,7 @@ const deserializeAws_queryReplicationGroup = (output: any, context: __SerdeConte
LogDeliveryConfigurations: undefined,
ReplicationGroupCreateTime: undefined,
DataTiering: undefined,
+ AutoMinorVersionUpgrade: undefined,
};
if (output["ReplicationGroupId"] !== undefined) {
contents.ReplicationGroupId = __expectString(output["ReplicationGroupId"]);
@@ -11442,6 +11443,9 @@ const deserializeAws_queryReplicationGroup = (output: any, context: __SerdeConte
if (output["DataTiering"] !== undefined) {
contents.DataTiering = __expectString(output["DataTiering"]);
}
+ if (output["AutoMinorVersionUpgrade"] !== undefined) {
+ contents.AutoMinorVersionUpgrade = __parseBoolean(output["AutoMinorVersionUpgrade"]);
+ }
return contents;
};
diff --git a/clients/client-frauddetector/CHANGELOG.md b/clients/client-frauddetector/CHANGELOG.md
index 2f49979b816b9..c8b36ca6d4161 100644
--- a/clients/client-frauddetector/CHANGELOG.md
+++ b/clients/client-frauddetector/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21)
+
+
+### Features
+
+* **client-frauddetector:** The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs. ([b8837a8](https://github.com/aws/aws-sdk-js-v3/commit/b8837a8fe3ee514eed76a87d54e9165433ea0215))
+
+
+
+
+
# [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15)
**Note:** Version bump only for package @aws-sdk/client-frauddetector
diff --git a/clients/client-frauddetector/package.json b/clients/client-frauddetector/package.json
index afa31f6fed997..235b3b8bde02b 100644
--- a/clients/client-frauddetector/package.json
+++ b/clients/client-frauddetector/package.json
@@ -1,7 +1,7 @@
{
"name": "@aws-sdk/client-frauddetector",
"description": "AWS SDK for JavaScript Frauddetector Client for Node.js, Browser and React Native",
- "version": "3.131.0",
+ "version": "3.135.0",
"scripts": {
"build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'",
"build:cjs": "tsc -p tsconfig.cjs.json",
diff --git a/clients/client-frauddetector/src/models/models_0.ts b/clients/client-frauddetector/src/models/models_0.ts
index 955a7ebe5e2e6..63f36c3c85ba6 100644
--- a/clients/client-frauddetector/src/models/models_0.ts
+++ b/clients/client-frauddetector/src/models/models_0.ts
@@ -22,6 +22,110 @@ export class AccessDeniedException extends __BaseException {
}
}
+/**
+ *
The log odds metric details.
+ * + *Account Takeover Insights (ATI) model uses event variables from the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
+ * The names of all the variables. + *
+ */ + variableNames: string[] | undefined; + + /** + *+ * The relative importance of the variables in the list to the other event variable. + *
+ */ + aggregatedVariablesImportance: number | undefined; +} + +export namespace AggregatedLogOddsMetric { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AggregatedLogOddsMetric): any => ({ + ...obj, + }); +} + +/** + *+ * The details of the impact of aggregated variables on the prediction score.
+ * + *Account Takeover Insights (ATI) model uses the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, the model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
+ * The names of all the event variables that were used to derive the aggregated variables. + *
+ */ + eventVariableNames?: string[]; + + /** + *+ * The relative impact of the aggregated variables in terms of magnitude on the prediction scores. + *
+ */ + relativeImpact?: string; + + /** + *+ * The raw, uninterpreted value represented as log-odds of the fraud. These values are usually between -10 to +10, but range from -infinity to +infinity.
+ *A positive value indicates that the variables drove the risk score up.
+ *A negative value indicates that the variables drove the risk score down.
+ *The details of the relative importance of the aggregated variables.
+ * + *Account Takeover Insights (ATI) model uses event variables from the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
+ * List of variables' metrics. + *
+ */ + logOddsMetrics?: AggregatedLogOddsMetric[]; +} + +export namespace AggregatedVariablesImportanceMetrics { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AggregatedVariablesImportanceMetrics): any => ({ + ...obj, + }); +} + export enum AsyncJobStatus { CANCELED = "CANCELED", CANCEL_IN_PROGRESS = "CANCEL_IN_PROGRESS", @@ -31,6 +135,110 @@ export enum AsyncJobStatus { IN_PROGRESS_INITIALIZING = "IN_PROGRESS_INITIALIZING", } +/** + *+ * The Account Takeover Insights (ATI) model performance metrics data points. + *
+ */ +export interface ATIMetricDataPoint { + /** + *+ * The challenge rate. This indicates the percentage of login events that the model recommends to challenge such as + * one-time password, multi-factor authentication, and investigations. + *
+ */ + cr?: number; + + /** + *+ * The anomaly discovery rate. This metric quantifies the percentage of anomalies that can be detected by the model at the selected score threshold. + * A lower score threshold increases the percentage of anomalies captured by the model, but would also require challenging a larger percentage of + * login events, leading to a higher customer friction. + *
+ */ + adr?: number; + + /** + *+ * The model's threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is + * labeled as fraud. + *
+ */ + threshold?: number; + + /** + *+ * The account takeover discovery rate. This metric quantifies the percentage of account compromise events that can be detected by the model at the selected score threshold. + * This metric is only available if 50 or more entities with at-least one labeled account takeover event is present in the ingested dataset. + *
+ */ + atodr?: number; +} + +export namespace ATIMetricDataPoint { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ATIMetricDataPoint): any => ({ + ...obj, + }); +} + +/** + *+ * The Account Takeover Insights (ATI) model performance score. + *
+ */ +export interface ATIModelPerformance { + /** + *+ * The anomaly separation index (ASI) score. This metric summarizes the overall ability of the model to separate anomalous activities from the normal behavior. Depending on the business, a + * large fraction of these anomalous activities can be malicious and correspond to the account takeover attacks. A model with no separability power will have the lowest possible + * ASI score of 0.5, whereas the a model with a high separability power will have the highest possible ASI score of 1.0 + *
+ */ + asi?: number; +} + +export namespace ATIModelPerformance { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ATIModelPerformance): any => ({ + ...obj, + }); +} + +/** + *+ * The Account Takeover Insights (ATI) model training metric details. + *
+ */ +export interface ATITrainingMetricsValue { + /** + *+ * The model's performance metrics data points. + *
+ */ + metricDataPoints?: ATIMetricDataPoint[]; + + /** + *+ * The model's overall performance scores. + *
+ */ + modelPerformance?: ATIModelPerformance; +} + +export namespace ATITrainingMetricsValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ATITrainingMetricsValue): any => ({ + ...obj, + }); +} + /** *A key and value pair.
*/ @@ -554,6 +762,7 @@ export namespace CreateBatchPredictionJobResult { } export enum ModelTypeEnum { + ACCOUNT_TAKEOVER_INSIGHTS = "ACCOUNT_TAKEOVER_INSIGHTS", ONLINE_FRAUD_INSIGHTS = "ONLINE_FRAUD_INSIGHTS", TRANSACTION_FRAUD_INSIGHTS = "TRANSACTION_FRAUD_INSIGHTS", } @@ -836,7 +1045,7 @@ export interface LabelSchema { *The label mapper maps the Amazon Fraud Detector supported model classification labels (FRAUD
, LEGIT
) to the appropriate event type labels. For example, if "FRAUD
" and "LEGIT
" are Amazon Fraud Detector supported labels, this mapper could be: {"FRAUD" => ["0"]
, "LEGIT" => ["1"]}
or {"FRAUD" => ["false"]
, "LEGIT" => ["true"]}
or {"FRAUD" => ["fraud", "abuse"]
, "LEGIT" => ["legit", "safe"]}
. The value part of the mapper is a list, because you may have multiple label variants from your event type for a single Amazon Fraud Detector label.
*
The action to take for unlabeled events.
@@ -865,7 +1074,7 @@ export interface TrainingDataSchema { /** *The label schema.
*/ - labelSchema: LabelSchema | undefined; + labelSchema?: LabelSchema; } export namespace TrainingDataSchema { @@ -1746,11 +1955,11 @@ export namespace FileValidationMessage { } /** - *The model training validation messages.
+ *The model training data validation metrics.
*/ export interface DataValidationMetrics { /** - *The file-specific model training validation messages.
+ *The file-specific model training data validation messages.
*/ fileLevelMessages?: FileValidationMessage[]; @@ -1904,6 +2113,281 @@ export namespace TrainingResult { }); } +/** + *+ * The Online Fraud Insights (OFI) model performance metrics data points. + *
+ */ +export interface OFIMetricDataPoint { + /** + *+ * The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud. + *
+ */ + fpr?: number; + + /** + *+ * The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent. + *
+ */ + precision?: number; + + /** + *+ * The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate. + *
+ */ + tpr?: number; + + /** + *+ * The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any model score 500 or above is labeled as fraud. + *
+ */ + threshold?: number; +} + +export namespace OFIMetricDataPoint { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OFIMetricDataPoint): any => ({ + ...obj, + }); +} + +/** + *+ * The Online Fraud Insights (OFI) model performance score. + *
+ */ +export interface OFIModelPerformance { + /** + *+ * The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds. + *
+ */ + auc?: number; +} + +export namespace OFIModelPerformance { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OFIModelPerformance): any => ({ + ...obj, + }); +} + +/** + *+ * The Online Fraud Insights (OFI) model training metric details. + *
+ */ +export interface OFITrainingMetricsValue { + /** + *+ * The model's performance metrics data points. + *
+ */ + metricDataPoints?: OFIMetricDataPoint[]; + + /** + *+ * The model's overall performance score. + *
+ */ + modelPerformance?: OFIModelPerformance; +} + +export namespace OFITrainingMetricsValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OFITrainingMetricsValue): any => ({ + ...obj, + }); +} + +/** + *+ * The performance metrics data points for Transaction Fraud Insights (TFI) model. + *
+ */ +export interface TFIMetricDataPoint { + /** + *+ * The false positive rate. This is the percentage of total legitimate events that are incorrectly predicted as fraud. + *
+ */ + fpr?: number; + + /** + *+ * The percentage of fraud events correctly predicted as fraudulent as compared to all events predicted as fraudulent. + *
+ */ + precision?: number; + + /** + *+ * The true positive rate. This is the percentage of total fraud the model detects. Also known as capture rate. + *
+ */ + tpr?: number; + + /** + *+ * The model threshold that specifies an acceptable fraud capture rate. For example, a threshold of 500 means any + * model score 500 or above is labeled as fraud. + *
+ */ + threshold?: number; +} + +export namespace TFIMetricDataPoint { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TFIMetricDataPoint): any => ({ + ...obj, + }); +} + +/** + *+ * The Transaction Fraud Insights (TFI) model performance score. + *
+ */ +export interface TFIModelPerformance { + /** + *+ * The area under the curve (auc). This summarizes the total positive rate (tpr) and false positive rate (FPR) across all possible model score thresholds. + *
+ */ + auc?: number; +} + +export namespace TFIModelPerformance { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TFIModelPerformance): any => ({ + ...obj, + }); +} + +/** + *+ * The Transaction Fraud Insights (TFI) model training metric details. + *
+ */ +export interface TFITrainingMetricsValue { + /** + *+ * The model's performance metrics data points. + *
+ */ + metricDataPoints?: TFIMetricDataPoint[]; + + /** + *+ * The model performance score. + *
+ */ + modelPerformance?: TFIModelPerformance; +} + +export namespace TFITrainingMetricsValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TFITrainingMetricsValue): any => ({ + ...obj, + }); +} + +/** + *+ * The training metrics details. + *
+ */ +export interface TrainingMetricsV2 { + /** + *+ * The Online Fraud Insights (OFI) model training metric details. + *
+ */ + ofi?: OFITrainingMetricsValue; + + /** + *+ * The Transaction Fraud Insights (TFI) model training metric details. + *
+ */ + tfi?: TFITrainingMetricsValue; + + /** + *+ * The Account Takeover Insights (ATI) model training metric details. + *
+ */ + ati?: ATITrainingMetricsValue; +} + +export namespace TrainingMetricsV2 { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TrainingMetricsV2): any => ({ + ...obj, + }); +} + +/** + *+ * The training result details. + *
+ */ +export interface TrainingResultV2 { + /** + *The model training data validation metrics.
+ */ + dataValidationMetrics?: DataValidationMetrics; + + /** + *+ * The training metric details. + *
+ */ + trainingMetricsV2?: TrainingMetricsV2; + + /** + *The variable importance metrics details.
+ */ + variableImportanceMetrics?: VariableImportanceMetrics; + + /** + *+ * The variable importance metrics of the aggregated variables. + *
+ *Account Takeover Insights (ATI) model uses event variables from the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
The details of the model version.
*/ @@ -1967,6 +2451,13 @@ export interface ModelVersionDetail { *The model version ARN.
*/ arn?: string; + + /** + *+ * The training result details. The details include the relative importance of the variables. + *
+ */ + trainingResultV2?: TrainingResultV2; } export namespace ModelVersionDetail { @@ -3071,6 +3562,17 @@ export interface PredictionExplanations { * */ variableImpactExplanations?: VariableImpactExplanation[]; + + /** + *+ * The details of the aggregated variables impact on the prediction score. + *
+ * + *Account Takeover Insights (ATI) model uses event variables from the login data you
+ * provide to continuously calculate a set of variables (aggregated variables) based on historical events. For example, your ATI model might calculate the number of times an user has logged in using the same IP address.
+ * In this case, event variables used to derive the aggregated variables are IP address
and user
.
The JobRun
timeout in minutes. This is the maximum time that a job run can
- * consume resources before it is terminated and enters TIMEOUT
status. The default
- * is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
TIMEOUT
status. This value overrides the timeout value set in the parent job.
+ *
+ * Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
*/ Timeout?: number; @@ -5726,7 +5727,7 @@ export interface JobRun { GlueVersion?: string; /** - *This field populates only when an Auto Scaling job run completes, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X
and 2 for G.2X
workers). This value may be different than the executionEngineRuntime
* MaxCapacity
as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity
. Therefore, it is possible that the value of DPUSeconds
is less than executionEngineRuntime
* MaxCapacity
.
This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X
, 2 for G.2X
, or 0.25 for G.025X
workers). This value may be different than the executionEngineRuntime
* MaxCapacity
as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity
. Therefore, it is possible that the value of DPUSeconds
is less than executionEngineRuntime
* MaxCapacity
.
The JobRun
timeout in minutes. This is the maximum time that a job run can
- * consume resources before it is terminated and enters TIMEOUT
status. The default
- * is 2,880 minutes (48 hours). This overrides the timeout value set in the parent job.
TIMEOUT
status. This value overrides the timeout value set in the parent job.
+ *
+ * Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
*/ Timeout?: number; diff --git a/clients/client-health/CHANGELOG.md b/clients/client-health/CHANGELOG.md index 56382fe1cb8a8..8b5c171ca2e94 100644 --- a/clients/client-health/CHANGELOG.md +++ b/clients/client-health/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **clients:** update client endpoints as of 2022-07-18 ([87e9e29](https://github.com/aws/aws-sdk-js-v3/commit/87e9e2970f5fd24742968462e3944235c5e08380)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-health diff --git a/clients/client-health/package.json b/clients/client-health/package.json index 8da1e7699280a..88c183a09cfa7 100644 --- a/clients/client-health/package.json +++ b/clients/client-health/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-health", "description": "AWS SDK for JavaScript Health Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-health/src/endpoints.ts b/clients/client-health/src/endpoints.ts index 0aef8959ef55d..4f40723d052a3 100644 --- a/clients/client-health/src/endpoints.ts +++ b/clients/client-health/src/endpoints.ts @@ -2,7 +2,26 @@ import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; -const regionHash: RegionHash = {}; +const regionHash: RegionHash = { + "aws-cn-global": { + variants: [ + { + hostname: "global.health.amazonaws.com.cn", + tags: [], + }, + ], + signingRegion: "cn-northwest-1", + }, + "aws-global": { + variants: [ + { + hostname: "global.health.amazonaws.com", + tags: [], + }, + ], + signingRegion: "us-east-1", + }, +}; const partitionHash: PartitionHash = { aws: { @@ -16,6 +35,7 @@ const partitionHash: PartitionHash = { "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", + "aws-global", "ca-central-1", "eu-central-1", "eu-north-1", @@ -50,9 +70,10 @@ const partitionHash: PartitionHash = { tags: ["dualstack"], }, ], + endpoint: "aws-global", }, "aws-cn": { - regions: ["cn-north-1", "cn-northwest-1"], + regions: ["aws-cn-global", "cn-north-1", "cn-northwest-1"], regionRegex: "^cn\\-\\w+\\-\\d+$", variants: [ { @@ -72,6 +93,7 @@ const partitionHash: PartitionHash = { tags: ["dualstack"], }, ], + endpoint: "aws-cn-global", }, "aws-iso": { regions: ["us-iso-east-1", "us-iso-west-1"], diff --git a/clients/client-iot/CHANGELOG.md b/clients/client-iot/CHANGELOG.md index 56d7b02c0ec7e..626154b0ca7a5 100644 --- a/clients/client-iot/CHANGELOG.md +++ b/clients/client-iot/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.134.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.133.0...v3.134.0) (2022-07-20) + + +### Features + +* **client-iot:** GA release the ability to enable/disable IoT Fleet Indexing for Device Defender and Named Shadow information, and search them through IoT Fleet Indexing APIs. This includes Named Shadow Selection as a part of the UpdateIndexingConfiguration API. ([358a73e](https://github.com/aws/aws-sdk-js-v3/commit/358a73e37eca1782a115e9c304bdc27889fc7026)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-iot diff --git a/clients/client-iot/package.json b/clients/client-iot/package.json index b2ea0fcfa7bf9..1e72c9da328f7 100644 --- a/clients/client-iot/package.json +++ b/clients/client-iot/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-iot", "description": "AWS SDK for JavaScript Iot Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.134.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-iot/src/IoT.ts b/clients/client-iot/src/IoT.ts index 17928fb3d91e7..2c89b5ba21741 100644 --- a/clients/client-iot/src/IoT.ts +++ b/clients/client-iot/src/IoT.ts @@ -1331,7 +1331,8 @@ export class IoT extends IoTClient { *Attaches the specified policy to the specified principal (certificate or other * credential).
*- * Note: This action is deprecated. Please use AttachPolicy instead.
+ * Note: This action is deprecated and works as + * expected for backward compatibility, but we won't add enhancements. Use AttachPolicy instead. *Requires permission to access the AttachPrincipalPolicy action.
*/ public attachPrincipalPolicy( diff --git a/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts b/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts index 8726d82600320..603940ea7676a 100644 --- a/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts +++ b/clients/client-iot/src/commands/AttachPrincipalPolicyCommand.ts @@ -28,7 +28,8 @@ export interface AttachPrincipalPolicyCommandOutput extends __MetadataBearer {} *Attaches the specified policy to the specified principal (certificate or other * credential).
*- * Note: This action is deprecated. Please use AttachPolicy instead.
+ * Note: This action is deprecated and works as + * expected for backward compatibility, but we won't add enhancements. Use AttachPolicy instead. *Requires permission to access the AttachPrincipalPolicy action.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts b/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts index ef010b34bb60f..bd513d1b7f893 100644 --- a/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts +++ b/clients/client-iot/src/commands/ListThingGroupsForThingCommand.ts @@ -13,7 +13,8 @@ import { } from "@aws-sdk/types"; import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { ListThingGroupsForThingRequest, ListThingGroupsForThingResponse } from "../models/models_1"; +import { ListThingGroupsForThingRequest } from "../models/models_1"; +import { ListThingGroupsForThingResponse } from "../models/models_2"; import { deserializeAws_restJson1ListThingGroupsForThingCommand, serializeAws_restJson1ListThingGroupsForThingCommand, diff --git a/clients/client-iot/src/models/models_1.ts b/clients/client-iot/src/models/models_1.ts index bce1915d84561..4abf0e237ff7e 100644 --- a/clients/client-iot/src/models/models_1.ts +++ b/clients/client-iot/src/models/models_1.ts @@ -4181,6 +4181,30 @@ export enum DeviceDefenderIndexingMode { VIOLATIONS = "VIOLATIONS", } +/** + *Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter.
+ * To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode
to be ON
and
+ * specify your shadow names in filter
.
The shadow names that you select to index. The default maximum number of shadow names for indexing is 10. To increase + * the limit, see Amazon Web Services IoT Device Management + * Quotas in the Amazon Web Services General Reference. + *
+ */ + namedShadowNames?: string[]; +} + +export namespace IndexingFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IndexingFilter): any => ({ + ...obj, + }); +} + export enum NamedShadowIndexingMode { OFF = "OFF", ON = "ON", @@ -4277,6 +4301,13 @@ export interface ThingIndexingConfiguration { *Contains custom field names and their data type.
*/ customFields?: Field[]; + + /** + *Provides additional filters for specific data sources. Named shadow is the only data source that currently supports and requires a filter.
+ * To add named shadows to your fleet indexing configuration, set namedShadowIndexingMode
to be ON
and
+ * specify your shadow names in filter
.
The thing groups.
- */ - thingGroups?: GroupNameAndArn[]; - - /** - *The token to use to get the next set of results, or null if there are no additional results.
- */ - nextToken?: string; -} - -export namespace ListThingGroupsForThingResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListThingGroupsForThingResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-iot/src/models/models_2.ts b/clients/client-iot/src/models/models_2.ts index 2f65553f98bb6..2e3cd056246d8 100644 --- a/clients/client-iot/src/models/models_2.ts +++ b/clients/client-iot/src/models/models_2.ts @@ -49,6 +49,7 @@ import { Configuration, DetectMitigationActionsTaskTarget, DomainConfigurationStatus, + GroupNameAndArn, LogTargetType, RegistrationConfig, Status, @@ -58,6 +59,27 @@ import { ViolationEventOccurrenceRange, } from "./models_1"; +export interface ListThingGroupsForThingResponse { + /** + *The thing groups.
+ */ + thingGroups?: GroupNameAndArn[]; + + /** + *The token to use to get the next set of results, or null if there are no additional results.
+ */ + nextToken?: string; +} + +export namespace ListThingGroupsForThingResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListThingGroupsForThingResponse): any => ({ + ...obj, + }); +} + /** *The input for the ListThingPrincipal operation.
*/ diff --git a/clients/client-iot/src/protocols/Aws_restJson1.ts b/clients/client-iot/src/protocols/Aws_restJson1.ts index 64c4ced0aab26..630e41d22bffe 100644 --- a/clients/client-iot/src/protocols/Aws_restJson1.ts +++ b/clients/client-iot/src/protocols/Aws_restJson1.ts @@ -837,6 +837,7 @@ import { Field, FleetMetricNameAndArn, GroupNameAndArn, + IndexingFilter, InternalServerException, Job, JobExecution, @@ -24326,6 +24327,14 @@ const serializeAws_restJson1HttpUrlDestinationConfiguration = ( }; }; +const serializeAws_restJson1IndexingFilter = (input: IndexingFilter, context: __SerdeContext): any => { + return { + ...(input.namedShadowNames != null && { + namedShadowNames: serializeAws_restJson1NamedShadowNamesFilter(input.namedShadowNames, context), + }), + }; +}; + const serializeAws_restJson1IotAnalyticsAction = (input: IotAnalyticsAction, context: __SerdeContext): any => { return { ...(input.batchMode != null && { batchMode: input.batchMode }), @@ -24523,6 +24532,17 @@ const serializeAws_restJson1MqttContext = (input: MqttContext, context: __SerdeC }; }; +const serializeAws_restJson1NamedShadowNamesFilter = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1NumberList = (input: number[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -25095,6 +25115,7 @@ const serializeAws_restJson1ThingIndexingConfiguration = ( return { ...(input.customFields != null && { customFields: serializeAws_restJson1Fields(input.customFields, context) }), ...(input.deviceDefenderIndexingMode != null && { deviceDefenderIndexingMode: input.deviceDefenderIndexingMode }), + ...(input.filter != null && { filter: serializeAws_restJson1IndexingFilter(input.filter, context) }), ...(input.managedFields != null && { managedFields: serializeAws_restJson1Fields(input.managedFields, context) }), ...(input.namedShadowIndexingMode != null && { namedShadowIndexingMode: input.namedShadowIndexingMode }), ...(input.thingConnectivityIndexingMode != null && { @@ -26777,6 +26798,15 @@ const deserializeAws_restJson1ImplicitDeny = (output: any, context: __SerdeConte } as any; }; +const deserializeAws_restJson1IndexingFilter = (output: any, context: __SerdeContext): IndexingFilter => { + return { + namedShadowNames: + output.namedShadowNames != null + ? deserializeAws_restJson1NamedShadowNamesFilter(output.namedShadowNames, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1IndexNamesList = (output: any, context: __SerdeContext): string[] => { const retVal = (output || []) .filter((e: any) => e != null) @@ -27353,6 +27383,18 @@ const deserializeAws_restJson1MitigationActionParams = ( } as any; }; +const deserializeAws_restJson1NamedShadowNamesFilter = (output: any, context: __SerdeContext): string[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); + return retVal; +}; + const deserializeAws_restJson1NonCompliantResource = (output: any, context: __SerdeContext): NonCompliantResource => { return { additionalInfo: @@ -28581,6 +28623,7 @@ const deserializeAws_restJson1ThingIndexingConfiguration = ( customFields: output.customFields != null ? deserializeAws_restJson1Fields(output.customFields, context) : undefined, deviceDefenderIndexingMode: __expectString(output.deviceDefenderIndexingMode), + filter: output.filter != null ? deserializeAws_restJson1IndexingFilter(output.filter, context) : undefined, managedFields: output.managedFields != null ? deserializeAws_restJson1Fields(output.managedFields, context) : undefined, namedShadowIndexingMode: __expectString(output.namedShadowIndexingMode), diff --git a/clients/client-iotsitewise/CHANGELOG.md b/clients/client-iotsitewise/CHANGELOG.md index 927d9b9d3af7c..5ed8e52e60d50 100644 --- a/clients/client-iotsitewise/CHANGELOG.md +++ b/clients/client-iotsitewise/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.135.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.134.0...v3.135.0) (2022-07-21) + + +### Features + +* **client-iotsitewise:** Added asynchronous API to ingest bulk historical and current data into IoT SiteWise. ([972d020](https://github.com/aws/aws-sdk-js-v3/commit/972d020ec9d8e713b9306aa8a39b036886127fc0)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-iotsitewise diff --git a/clients/client-iotsitewise/package.json b/clients/client-iotsitewise/package.json index d4587f6d7e388..e21b29ae39248 100644 --- a/clients/client-iotsitewise/package.json +++ b/clients/client-iotsitewise/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-iotsitewise", "description": "AWS SDK for JavaScript Iotsitewise Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.135.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-iotsitewise/src/IoTSiteWise.ts b/clients/client-iotsitewise/src/IoTSiteWise.ts index c6c9fbfe6cb2e..85e2a7de3b071 100644 --- a/clients/client-iotsitewise/src/IoTSiteWise.ts +++ b/clients/client-iotsitewise/src/IoTSiteWise.ts @@ -52,6 +52,11 @@ import { CreateAssetModelCommandInput, CreateAssetModelCommandOutput, } from "./commands/CreateAssetModelCommand"; +import { + CreateBulkImportJobCommand, + CreateBulkImportJobCommandInput, + CreateBulkImportJobCommandOutput, +} from "./commands/CreateBulkImportJobCommand"; import { CreateDashboardCommand, CreateDashboardCommandInput, @@ -128,6 +133,11 @@ import { DescribeAssetPropertyCommandInput, DescribeAssetPropertyCommandOutput, } from "./commands/DescribeAssetPropertyCommand"; +import { + DescribeBulkImportJobCommand, + DescribeBulkImportJobCommandInput, + DescribeBulkImportJobCommandOutput, +} from "./commands/DescribeBulkImportJobCommand"; import { DescribeDashboardCommand, DescribeDashboardCommandInput, @@ -224,6 +234,11 @@ import { ListAssociatedAssetsCommandInput, ListAssociatedAssetsCommandOutput, } from "./commands/ListAssociatedAssetsCommand"; +import { + ListBulkImportJobsCommand, + ListBulkImportJobsCommandInput, + ListBulkImportJobsCommandOutput, +} from "./commands/ListBulkImportJobsCommand"; import { ListDashboardsCommand, ListDashboardsCommandInput, @@ -706,6 +721,49 @@ export class IoTSiteWise extends IoTSiteWiseClient { } } + /** + *This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.
+ *Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, + * see Create a bulk import job (CLI) + * in the Amazon Simple Storage Service User Guide.
+ *You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job. + * For more information about how to configure storage settings, + * see PutStorageConfiguration.
+ *Creates a dashboard in an IoT SiteWise Monitor project.
*/ @@ -1259,6 +1317,44 @@ export class IoTSiteWise extends IoTSiteWiseClient { } } + /** + *This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.
+ *Retrieves information about a bulk import job request. For more information, + * see Describe a bulk import job (CLI) + * in the Amazon Simple Storage Service User Guide.
+ */ + public describeBulkImportJob( + args: DescribeBulkImportJobCommandInput, + options?: __HttpHandlerOptions + ): PromiseRetrieves information about a dashboard.
*/ @@ -1996,6 +2092,44 @@ export class IoTSiteWise extends IoTSiteWiseClient { } } + /** + *This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.
+ *Retrieves a paginated list of bulk import job requests. For more information, + * see List bulk import jobs (CLI) + * in the Amazon Simple Storage Service User Guide.
+ */ + public listBulkImportJobs( + args: ListBulkImportJobsCommandInput, + options?: __HttpHandlerOptions + ): PromiseRetrieves a paginated list of dashboards for an IoT SiteWise Monitor project.
*/ diff --git a/clients/client-iotsitewise/src/IoTSiteWiseClient.ts b/clients/client-iotsitewise/src/IoTSiteWiseClient.ts index 0370cfa593875..dc828a327c221 100644 --- a/clients/client-iotsitewise/src/IoTSiteWiseClient.ts +++ b/clients/client-iotsitewise/src/IoTSiteWiseClient.ts @@ -85,6 +85,10 @@ import { import { CreateAccessPolicyCommandInput, CreateAccessPolicyCommandOutput } from "./commands/CreateAccessPolicyCommand"; import { CreateAssetCommandInput, CreateAssetCommandOutput } from "./commands/CreateAssetCommand"; import { CreateAssetModelCommandInput, CreateAssetModelCommandOutput } from "./commands/CreateAssetModelCommand"; +import { + CreateBulkImportJobCommandInput, + CreateBulkImportJobCommandOutput, +} from "./commands/CreateBulkImportJobCommand"; import { CreateDashboardCommandInput, CreateDashboardCommandOutput } from "./commands/CreateDashboardCommand"; import { CreateGatewayCommandInput, CreateGatewayCommandOutput } from "./commands/CreateGatewayCommand"; import { CreatePortalCommandInput, CreatePortalCommandOutput } from "./commands/CreatePortalCommand"; @@ -107,6 +111,10 @@ import { DescribeAssetPropertyCommandInput, DescribeAssetPropertyCommandOutput, } from "./commands/DescribeAssetPropertyCommand"; +import { + DescribeBulkImportJobCommandInput, + DescribeBulkImportJobCommandOutput, +} from "./commands/DescribeBulkImportJobCommand"; import { DescribeDashboardCommandInput, DescribeDashboardCommandOutput } from "./commands/DescribeDashboardCommand"; import { DescribeDefaultEncryptionConfigurationCommandInput, @@ -160,6 +168,7 @@ import { ListAssociatedAssetsCommandInput, ListAssociatedAssetsCommandOutput, } from "./commands/ListAssociatedAssetsCommand"; +import { ListBulkImportJobsCommandInput, ListBulkImportJobsCommandOutput } from "./commands/ListBulkImportJobsCommand"; import { ListDashboardsCommandInput, ListDashboardsCommandOutput } from "./commands/ListDashboardsCommand"; import { ListGatewaysCommandInput, ListGatewaysCommandOutput } from "./commands/ListGatewaysCommand"; import { ListPortalsCommandInput, ListPortalsCommandOutput } from "./commands/ListPortalsCommand"; @@ -210,6 +219,7 @@ export type ServiceInputTypes = | CreateAccessPolicyCommandInput | CreateAssetCommandInput | CreateAssetModelCommandInput + | CreateBulkImportJobCommandInput | CreateDashboardCommandInput | CreateGatewayCommandInput | CreatePortalCommandInput @@ -226,6 +236,7 @@ export type ServiceInputTypes = | DescribeAssetCommandInput | DescribeAssetModelCommandInput | DescribeAssetPropertyCommandInput + | DescribeBulkImportJobCommandInput | DescribeDashboardCommandInput | DescribeDefaultEncryptionConfigurationCommandInput | DescribeGatewayCapabilityConfigurationCommandInput @@ -246,6 +257,7 @@ export type ServiceInputTypes = | ListAssetRelationshipsCommandInput | ListAssetsCommandInput | ListAssociatedAssetsCommandInput + | ListBulkImportJobsCommandInput | ListDashboardsCommandInput | ListGatewaysCommandInput | ListPortalsCommandInput @@ -280,6 +292,7 @@ export type ServiceOutputTypes = | CreateAccessPolicyCommandOutput | CreateAssetCommandOutput | CreateAssetModelCommandOutput + | CreateBulkImportJobCommandOutput | CreateDashboardCommandOutput | CreateGatewayCommandOutput | CreatePortalCommandOutput @@ -296,6 +309,7 @@ export type ServiceOutputTypes = | DescribeAssetCommandOutput | DescribeAssetModelCommandOutput | DescribeAssetPropertyCommandOutput + | DescribeBulkImportJobCommandOutput | DescribeDashboardCommandOutput | DescribeDefaultEncryptionConfigurationCommandOutput | DescribeGatewayCapabilityConfigurationCommandOutput @@ -316,6 +330,7 @@ export type ServiceOutputTypes = | ListAssetRelationshipsCommandOutput | ListAssetsCommandOutput | ListAssociatedAssetsCommandOutput + | ListBulkImportJobsCommandOutput | ListDashboardsCommandOutput | ListGatewaysCommandOutput | ListPortalsCommandOutput diff --git a/clients/client-iotsitewise/src/commands/CreateBulkImportJobCommand.ts b/clients/client-iotsitewise/src/commands/CreateBulkImportJobCommand.ts new file mode 100644 index 0000000000000..3c7442b17a38c --- /dev/null +++ b/clients/client-iotsitewise/src/commands/CreateBulkImportJobCommand.ts @@ -0,0 +1,107 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTSiteWiseClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTSiteWiseClient"; +import { CreateBulkImportJobRequest, CreateBulkImportJobResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateBulkImportJobCommand, + serializeAws_restJson1CreateBulkImportJobCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateBulkImportJobCommandInput extends CreateBulkImportJobRequest {} +export interface CreateBulkImportJobCommandOutput extends CreateBulkImportJobResponse, __MetadataBearer {} + +/** + *This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.
+ *Defines a job to ingest data to IoT SiteWise from Amazon S3. For more information, + * see Create a bulk import job (CLI) + * in the Amazon Simple Storage Service User Guide.
+ *You must enable IoT SiteWise to export data to Amazon S3 before you create a bulk import job. + * For more information about how to configure storage settings, + * see PutStorageConfiguration.
+ *This API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.
+ *Retrieves information about a bulk import job request. For more information, + * see Describe a bulk import job (CLI) + * in the Amazon Simple Storage Service User Guide.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTSiteWiseClient, DescribeBulkImportJobCommand } from "@aws-sdk/client-iotsitewise"; // ES Modules import + * // const { IoTSiteWiseClient, DescribeBulkImportJobCommand } = require("@aws-sdk/client-iotsitewise"); // CommonJS import + * const client = new IoTSiteWiseClient(config); + * const command = new DescribeBulkImportJobCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeBulkImportJobCommandInput} for command's `input` shape. + * @see {@link DescribeBulkImportJobCommandOutput} for command's `response` shape. + * @see {@link IoTSiteWiseClientResolvedConfig | config} for IoTSiteWiseClient's `config` shape. + * + */ +export class DescribeBulkImportJobCommand extends $Command< + DescribeBulkImportJobCommandInput, + DescribeBulkImportJobCommandOutput, + IoTSiteWiseClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeBulkImportJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThis API operation is in preview release for IoT SiteWise and is subject to change. + * We recommend that you use this operation only with test data, and not in production environments.
+ *Retrieves a paginated list of bulk import job requests. For more information, + * see List bulk import jobs (CLI) + * in the Amazon Simple Storage Service User Guide.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTSiteWiseClient, ListBulkImportJobsCommand } from "@aws-sdk/client-iotsitewise"; // ES Modules import + * // const { IoTSiteWiseClient, ListBulkImportJobsCommand } = require("@aws-sdk/client-iotsitewise"); // CommonJS import + * const client = new IoTSiteWiseClient(config); + * const command = new ListBulkImportJobsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListBulkImportJobsCommandInput} for command's `input` shape. + * @see {@link ListBulkImportJobsCommandOutput} for command's `response` shape. + * @see {@link IoTSiteWiseClientResolvedConfig | config} for IoTSiteWiseClient's `config` shape. + * + */ +export class ListBulkImportJobsCommand extends $Command< + ListBulkImportJobsCommandInput, + ListBulkImportJobsCommandOutput, + IoTSiteWiseClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListBulkImportJobsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe Amazon S3 destination where errors associated with the job creation request are saved.
+ */ +export interface ErrorReportLocation { + /** + *The name of the Amazon S3 bucket to which errors associated with the bulk import job are sent.
+ */ + bucket: string | undefined; + + /** + *Amazon S3 uses the prefix as a folder name to organize data in the bucket. + * Each Amazon S3 object has a key that is its unique identifier in the bucket. + * Each object in a bucket has exactly one key. The prefix must end with a forward slash (/). + * For more information, see Organizing objects using prefixes + * in the Amazon Simple Storage Service User Guide.
+ */ + prefix: string | undefined; +} + +export namespace ErrorReportLocation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ErrorReportLocation): any => ({ + ...obj, + }); +} + +/** + *The file in Amazon S3 where your data is saved.
+ */ +export interface File { + /** + *The name of the Amazon S3 bucket from which data is imported.
+ */ + bucket: string | undefined; + + /** + *The key of the Amazon S3 object that contains your data. Each object has a key that is a + * unique identifier. Each object has exactly one key.
+ */ + key: string | undefined; + + /** + *The version ID to identify a specific version of the Amazon S3 object that contains your data.
+ */ + versionId?: string; +} + +export namespace File { + /** + * @internal + */ + export const filterSensitiveLog = (obj: File): any => ({ + ...obj, + }); +} + +export enum ColumnName { + ALIAS = "ALIAS", + ASSET_ID = "ASSET_ID", + DATA_TYPE = "DATA_TYPE", + PROPERTY_ID = "PROPERTY_ID", + QUALITY = "QUALITY", + TIMESTAMP_NANO_OFFSET = "TIMESTAMP_NANO_OFFSET", + TIMESTAMP_SECONDS = "TIMESTAMP_SECONDS", + VALUE = "VALUE", +} + +/** + *A .csv file.
+ */ +export interface Csv { + /** + *The column names specified in the .csv file.
+ */ + columnNames?: (ColumnName | string)[]; +} + +export namespace Csv { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Csv): any => ({ + ...obj, + }); +} + +/** + *The file format of the data.
+ */ +export interface FileFormat { + /** + *The .csv file format.
+ */ + csv?: Csv; +} + +export namespace FileFormat { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FileFormat): any => ({ + ...obj, + }); +} + +/** + *Contains the configuration information of a job, such as the file format used to save data in Amazon S3.
+ */ +export interface JobConfiguration { + /** + *The file format of the data in Amazon S3.
+ */ + fileFormat: FileFormat | undefined; +} + +export namespace JobConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: JobConfiguration): any => ({ + ...obj, + }); +} + +export interface CreateBulkImportJobRequest { + /** + *The unique name that helps identify the job request.
+ */ + jobName: string | undefined; + + /** + *The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.
+ */ + jobRoleArn: string | undefined; + + /** + *The files in the specified Amazon S3 bucket that contain your data.
+ */ + files: File[] | undefined; + + /** + *The Amazon S3 destination where errors associated with the job creation request are saved.
+ */ + errorReportLocation: ErrorReportLocation | undefined; + + /** + *Contains the configuration information of a job, such as the file format used to save data in Amazon S3.
+ */ + jobConfiguration: JobConfiguration | undefined; +} + +export namespace CreateBulkImportJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateBulkImportJobRequest): any => ({ + ...obj, + }); +} + +export enum JobStatus { + CANCELLED = "CANCELLED", + COMPLETED = "COMPLETED", + COMPLETED_WITH_FAILURES = "COMPLETED_WITH_FAILURES", + FAILED = "FAILED", + PENDING = "PENDING", + RUNNING = "RUNNING", +} + +export interface CreateBulkImportJobResponse { + /** + *The ID of the job.
+ */ + jobId: string | undefined; + + /** + *The unique name that helps identify the job request.
+ */ + jobName: string | undefined; + + /** + *The status of the bulk import job can be one of following values.
+ *
+ * PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.
+ * CANCELLED
– The bulk import job has been canceled.
+ * RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.
+ * COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.
+ * FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
+ * COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
The ID of the project in which to create the dashboard.
@@ -4241,6 +4466,106 @@ export namespace DescribeAssetPropertyResponse { }); } +export interface DescribeBulkImportJobRequest { + /** + *The ID of the job.
+ */ + jobId: string | undefined; +} + +export namespace DescribeBulkImportJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeBulkImportJobRequest): any => ({ + ...obj, + }); +} + +export interface DescribeBulkImportJobResponse { + /** + *The ID of the job.
+ */ + jobId: string | undefined; + + /** + *The unique name that helps identify the job request.
+ */ + jobName: string | undefined; + + /** + *The status of the bulk import job can be one of following values.
+ *
+ * PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.
+ * CANCELLED
– The bulk import job has been canceled.
+ * RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.
+ * COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.
+ * FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
+ * COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
The ARN of the IAM role that allows IoT SiteWise to read Amazon S3 data.
+ */ + jobRoleArn: string | undefined; + + /** + *The files in the specified Amazon S3 bucket that contain your data.
+ */ + files: File[] | undefined; + + /** + *The Amazon S3 destination where errors associated with the job creation request are saved.
+ */ + errorReportLocation: ErrorReportLocation | undefined; + + /** + *Contains the configuration information of a job, such as the file format used to save data in Amazon S3.
+ */ + jobConfiguration: JobConfiguration | undefined; + + /** + *The date the job was created, in Unix epoch TIME.
+ */ + jobCreationDate: Date | undefined; + + /** + *The date the job was last updated, in Unix epoch time.
+ */ + jobLastUpdateDate: Date | undefined; +} + +export namespace DescribeBulkImportJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeBulkImportJobResponse): any => ({ + ...obj, + }); +} + export interface DescribeDashboardRequest { /** *The ID of the dashboard.
@@ -5911,6 +6236,120 @@ export namespace ListAssociatedAssetsResponse { }); } +export enum ListBulkImportJobsFilter { + ALL = "ALL", + CANCELLED = "CANCELLED", + COMPLETED = "COMPLETED", + COMPLETED_WITH_FAILURES = "COMPLETED_WITH_FAILURES", + FAILED = "FAILED", + PENDING = "PENDING", + RUNNING = "RUNNING", +} + +export interface ListBulkImportJobsRequest { + /** + *The token to be used for the next set of paginated results.
+ */ + nextToken?: string; + + /** + *The maximum number of results to return for each paginated request.
+ */ + maxResults?: number; + + /** + *You can use a filter to select the bulk import jobs that you want to retrieve.
+ */ + filter?: ListBulkImportJobsFilter | string; +} + +export namespace ListBulkImportJobsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListBulkImportJobsRequest): any => ({ + ...obj, + }); +} + +/** + *Contains a job summary information.
+ */ +export interface JobSummary { + /** + *The ID of the job.
+ */ + id: string | undefined; + + /** + *The unique name that helps identify the job request.
+ */ + name: string | undefined; + + /** + *The status of the bulk import job can be one of following values.
+ *
+ * PENDING
– IoT SiteWise is waiting for the current bulk import job to finish.
+ * CANCELLED
– The bulk import job has been canceled.
+ * RUNNING
– IoT SiteWise is processing your request to import your data from Amazon S3.
+ * COMPLETED
– IoT SiteWise successfully completed your request to import data from Amazon S3.
+ * FAILED
– IoT SiteWise couldn't process your request to import data from Amazon S3.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
+ * COMPLETED_WITH_FAILURES
– IoT SiteWise completed your request to import data from Amazon S3 with errors.
+ * You can use logs saved in the specified error report location in Amazon S3 to troubleshoot issues.
One or more job summaries to list.
+ */ + jobSummaries: JobSummary[] | undefined; + + /** + *The token for the next set of results, or null if there are no additional results.
+ */ + nextToken?: string; +} + +export namespace ListBulkImportJobsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListBulkImportJobsResponse): any => ({ + ...obj, + }); +} + export interface ListDashboardsRequest { /** *The ID of the project.
diff --git a/clients/client-iotsitewise/src/pagination/ListBulkImportJobsPaginator.ts b/clients/client-iotsitewise/src/pagination/ListBulkImportJobsPaginator.ts new file mode 100644 index 0000000000000..3196a6d3938c5 --- /dev/null +++ b/clients/client-iotsitewise/src/pagination/ListBulkImportJobsPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListBulkImportJobsCommand, + ListBulkImportJobsCommandInput, + ListBulkImportJobsCommandOutput, +} from "../commands/ListBulkImportJobsCommand"; +import { IoTSiteWise } from "../IoTSiteWise"; +import { IoTSiteWiseClient } from "../IoTSiteWiseClient"; +import { IoTSiteWisePaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: IoTSiteWiseClient, + input: ListBulkImportJobsCommandInput, + ...args: any +): PromiseYou can use this to re-configure your existing document level access - * control without indexing all of your documents again. For example, your - * index contains top-secret company documents that only certain employees - * or users should access. One of these users leaves the company or switches - * to a team that should be blocked from access to top-secret documents. - * Your documents in your index still give this user access to top-secret - * documents due to the user having access at the time your documents were - * indexed. You can create a specific access control configuration for this - * user with deny access. You can later update the access control - * configuration to allow access in the case the user returns to the company - * and re-joins the 'top-secret' team. You can re-configure access control - * for your documents circumstances change.
+ *You can use this to re-configure your existing document level access control without + * indexing all of your documents again. For example, your index contains top-secret + * company documents that only certain employees or users should access. One of these users + * leaves the company or switches to a team that should be blocked from accessing + * top-secret documents. The user still has access to top-secret documents because the user + * had access when your documents were previously indexed. You + * can create a specific access control configuration for the user with deny + * access. You can later update the access control configuration to allow access if the + * user returns to the company and re-joins the 'top-secret' team. You can re-configure + * access control for your documents as circumstances change.
*To apply your access control configuration to certain documents, you call
* the BatchPutDocument
* API with the AccessControlConfigurationId
included in the
@@ -2176,12 +2174,12 @@ export class Kendra extends KendraClient {
* suddenly returns to their previous team and should no longer have access to top secret
* documents. You can update the access control configuration to re-configure access
* control for your documents as circumstances change.
You call the BatchPutDocument
- * API to apply the updated access control configuration, with the
- * AccessControlConfigurationId
included in the
+ *
You call the BatchPutDocument API to
+ * apply the updated access control configuration, with the
+ * AccessControlConfigurationId
included in the
* Document
* object. If you use an S3 bucket as a data source, you synchronize your data source to
- * apply the the AccessControlConfigurationId
in the .metadata.json
file.
+ * apply the AccessControlConfigurationId
in the .metadata.json
file.
* Amazon Kendra currently only supports access control configuration for S3 data
* sources and documents indexed using the BatchPutDocument
API.
You can use this to re-configure your existing document level access - * control without indexing all of your documents again. For example, your - * index contains top-secret company documents that only certain employees - * or users should access. One of these users leaves the company or switches - * to a team that should be blocked from access to top-secret documents. - * Your documents in your index still give this user access to top-secret - * documents due to the user having access at the time your documents were - * indexed. You can create a specific access control configuration for this - * user with deny access. You can later update the access control - * configuration to allow access in the case the user returns to the company - * and re-joins the 'top-secret' team. You can re-configure access control - * for your documents circumstances change.
+ *You can use this to re-configure your existing document level access control without + * indexing all of your documents again. For example, your index contains top-secret + * company documents that only certain employees or users should access. One of these users + * leaves the company or switches to a team that should be blocked from accessing + * top-secret documents. The user still has access to top-secret documents because the user + * had access when your documents were previously indexed. You + * can create a specific access control configuration for the user with deny + * access. You can later update the access control configuration to allow access if the + * user returns to the company and re-joins the 'top-secret' team. You can re-configure + * access control for your documents as circumstances change.
*To apply your access control configuration to certain documents, you call
* the BatchPutDocument
* API with the AccessControlConfigurationId
included in the
diff --git a/clients/client-kendra/src/commands/QueryCommand.ts b/clients/client-kendra/src/commands/QueryCommand.ts
index a43409f1e7746..7211c37bc04b5 100644
--- a/clients/client-kendra/src/commands/QueryCommand.ts
+++ b/clients/client-kendra/src/commands/QueryCommand.ts
@@ -13,8 +13,7 @@ import {
} from "@aws-sdk/types";
import { KendraClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KendraClient";
-import { QueryResult } from "../models/models_0";
-import { QueryRequest } from "../models/models_1";
+import { QueryRequest, QueryResult } from "../models/models_1";
import { deserializeAws_json1_1QueryCommand, serializeAws_json1_1QueryCommand } from "../protocols/Aws_json1_1";
export interface QueryCommandInput extends QueryRequest {}
diff --git a/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts b/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts
index 2248110a4e519..825a9425d5ed9 100644
--- a/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts
+++ b/clients/client-kendra/src/commands/UpdateAccessControlConfigurationCommand.ts
@@ -37,12 +37,12 @@ export interface UpdateAccessControlConfigurationCommandOutput
* suddenly returns to their previous team and should no longer have access to top secret
* documents. You can update the access control configuration to re-configure access
* control for your documents as circumstances change.
You call the BatchPutDocument
- * API to apply the updated access control configuration, with the
- * AccessControlConfigurationId
included in the
+ *
You call the BatchPutDocument API to
+ * apply the updated access control configuration, with the
+ * AccessControlConfigurationId
included in the
* Document
* object. If you use an S3 bucket as a data source, you synchronize your data source to
- * apply the the AccessControlConfigurationId
in the .metadata.json
file.
+ * apply the AccessControlConfigurationId
in the .metadata.json
file.
* Amazon Kendra currently only supports access control configuration for S3 data
* sources and documents indexed using the BatchPutDocument
API.
You can also provide OAuth authentication credentials of user name, + * password, client ID, and client secret. For more information, see + * Authentication + * for a SharePoint data source.
*/ SecretArn: string | undefined; @@ -4207,6 +4216,14 @@ export interface SharePointConfiguration { * this to connect to SharePoint. */ SslCertificateS3Path?: S3Path; + + /** + *Whether you want to connect to SharePoint using basic authentication of + * user name and password, or OAuth authentication of user name, password, + * client ID, and client secret. You can use OAuth authentication for + * SharePoint Online.
+ */ + AuthenticationType?: SharePointOnlineAuthenticationType | string; } export namespace SharePointConfiguration { @@ -5906,7 +5923,8 @@ export interface DescribeAccessControlConfigurationRequest { IndexId: string | undefined; /** - *The identifier of the access control configuration you want to get information on.
+ *The identifier of the access control configuration + * you want to get information on.
*/ Id: string | undefined; } @@ -5945,8 +5963,8 @@ export interface DescribeAccessControlConfigurationResponse { AccessControlList?: Principal[]; /** - *The list of principal lists that - * define the hierarchy for which documents users should have access to.
+ *The list of principal + * lists that define the hierarchy for which documents users should have access to.
*/ HierarchicalAccessControlList?: HierarchicalPrincipal[]; } @@ -7654,10 +7672,10 @@ export interface ListAccessControlConfigurationsRequest { IndexId: string | undefined; /** - *If the previous response was incomplete (because there is more data - * to retrieve), Amazon Kendra returns a pagination token in the response. - * You can use this pagination token to retrieve the next set of access - * control configurations.
+ *If the previous response was incomplete (because + * there's more data to retrieve), Amazon Kendra returns a pagination token + * in the response. You can use this pagination token to retrieve the next set + * of access control configurations.
*/ NextToken?: string; @@ -7678,9 +7696,9 @@ export namespace ListAccessControlConfigurationsRequest { export interface ListAccessControlConfigurationsResponse { /** - *If the response is truncated, Amazon Kendra returns this token - * that you can use in the subsequent request to retrieve the next set of - * access control configurations.
+ *If the response is truncated, Amazon Kendra returns this + * token, which you can use in the subsequent request to retrieve the + * next set of access control configurations.
*/ NextToken?: string; @@ -10328,53 +10346,3 @@ export namespace AttributeFilter { ...obj, }); } - -export interface QueryResult { - /** - *The unique identifier for the search. You use QueryId
- * to identify the search when using the feedback API.
The results of the search.
- */ - ResultItems?: QueryResultItem[]; - - /** - *Contains the facet results. A FacetResult
contains the
- * counts for each attribute key that was specified in the
- * Facets
input parameter.
The total number of items found by the search; however, you can only - * retrieve up to 100 items. For example, if the search found 192 items, - * you can only retrieve the first 100 of the items.
- */ - TotalNumberOfResults?: number; - - /** - *A list of warning codes and their messages on problems with your query.
- *Amazon Kendra currently only supports one type of warning, which is a warning - * on invalid syntax used in the query. For examples of invalid query syntax, - * see Searching - * with advanced query syntax.
- */ - Warnings?: Warning[]; - - /** - *A list of information related to suggested spell corrections for a query.
- */ - SpellCorrectedQueries?: SpellCorrectedQuery[]; -} - -export namespace QueryResult { - /** - * @internal - */ - export const filterSensitiveLog = (obj: QueryResult): any => ({ - ...obj, - }); -} diff --git a/clients/client-kendra/src/models/models_1.ts b/clients/client-kendra/src/models/models_1.ts index e6379c5c9fc40..9dfdd7ad900b7 100644 --- a/clients/client-kendra/src/models/models_1.ts +++ b/clients/client-kendra/src/models/models_1.ts @@ -3,12 +3,66 @@ import { AttributeFilter, DocumentRelevanceConfiguration, Facet, + FacetResult, + QueryResultItem, QueryResultType, SortingConfiguration, + SpellCorrectedQuery, SpellCorrectionConfiguration, UserContext, + Warning, } from "./models_0"; +export interface QueryResult { + /** + *The unique identifier for the search. You use QueryId
+ * to identify the search when using the feedback API.
The results of the search.
+ */ + ResultItems?: QueryResultItem[]; + + /** + *Contains the facet results. A FacetResult
contains the
+ * counts for each attribute key that was specified in the
+ * Facets
input parameter.
The total number of items found by the search; however, you can only + * retrieve up to 100 items. For example, if the search found 192 items, + * you can only retrieve the first 100 of the items.
+ */ + TotalNumberOfResults?: number; + + /** + *A list of warning codes and their messages on problems with your query.
+ *Amazon Kendra currently only supports one type of warning, which is a warning + * on invalid syntax used in the query. For examples of invalid query syntax, + * see Searching + * with advanced query syntax.
+ */ + Warnings?: Warning[]; + + /** + *A list of information related to suggested spell corrections for a query.
+ */ + SpellCorrectedQueries?: SpellCorrectedQuery[]; +} + +export namespace QueryResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryResult): any => ({ + ...obj, + }); +} + export interface QueryRequest { /** *The unique identifier of the index to search. The identifier is diff --git a/clients/client-kendra/src/protocols/Aws_json1_1.ts b/clients/client-kendra/src/protocols/Aws_json1_1.ts index 2e07cef676f0b..53736bc9acc89 100644 --- a/clients/client-kendra/src/protocols/Aws_json1_1.ts +++ b/clients/client-kendra/src/protocols/Aws_json1_1.ts @@ -345,7 +345,6 @@ import { Principal, ProxyConfiguration, PutPrincipalMappingRequest, - QueryResult, QueryResultItem, QuerySuggestionsBlockListSummary, QuipConfiguration, @@ -420,7 +419,7 @@ import { WebCrawlerConfiguration, WorkDocsConfiguration, } from "../models/models_0"; -import { QueryRequest } from "../models/models_1"; +import { QueryRequest, QueryResult } from "../models/models_1"; export const serializeAws_json1_1AssociateEntitiesToExperienceCommand = async ( input: AssociateEntitiesToExperienceCommandInput, @@ -7290,6 +7289,7 @@ const serializeAws_json1_1ServiceNowServiceCatalogConfiguration = ( const serializeAws_json1_1SharePointConfiguration = (input: SharePointConfiguration, context: __SerdeContext): any => { return { + ...(input.AuthenticationType != null && { AuthenticationType: input.AuthenticationType }), ...(input.CrawlAttachments != null && { CrawlAttachments: input.CrawlAttachments }), ...(input.DisableLocalGroups != null && { DisableLocalGroups: input.DisableLocalGroups }), ...(input.DocumentTitleFieldName != null && { DocumentTitleFieldName: input.DocumentTitleFieldName }), @@ -10761,6 +10761,7 @@ const deserializeAws_json1_1SharePointConfiguration = ( context: __SerdeContext ): SharePointConfiguration => { return { + AuthenticationType: __expectString(output.AuthenticationType), CrawlAttachments: __expectBoolean(output.CrawlAttachments), DisableLocalGroups: __expectBoolean(output.DisableLocalGroups), DocumentTitleFieldName: __expectString(output.DocumentTitleFieldName), diff --git a/clients/client-kms/CHANGELOG.md b/clients/client-kms/CHANGELOG.md index 962421ed8c8ef..e1b01fe4d205a 100644 --- a/clients/client-kms/CHANGELOG.md +++ b/clients/client-kms/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. +# [3.132.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.131.0...v3.132.0) (2022-07-18) + + +### Features + +* **client-kms:** Added support for the SM2 KeySpec in China Partition Regions ([250fd19](https://github.com/aws/aws-sdk-js-v3/commit/250fd19d65a83b3d183ced9863bf6aba8f1fb698)) + + + + + # [3.131.0](https://github.com/aws/aws-sdk-js-v3/compare/v3.130.0...v3.131.0) (2022-07-15) **Note:** Version bump only for package @aws-sdk/client-kms diff --git a/clients/client-kms/package.json b/clients/client-kms/package.json index 45a1c873a5cb3..338b29a723f06 100644 --- a/clients/client-kms/package.json +++ b/clients/client-kms/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-kms", "description": "AWS SDK for JavaScript Kms Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-kms/src/KMS.ts b/clients/client-kms/src/KMS.ts index 55ab111cf1d49..a59246363df49 100644 --- a/clients/client-kms/src/KMS.ts +++ b/clients/client-kms/src/KMS.ts @@ -490,7 +490,7 @@ export class KMS extends KMSClient { /** *
Creates a custom key store that is associated with an CloudHSM cluster that you own and * manage.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*Before you create the custom key store, you must assemble @@ -671,7 +671,9 @@ export class KMS extends KMSClient { *
To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for
* KeySpec
, SYMMETRIC_DEFAULT
, and the default value for
- * KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key.
KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key. For technical details, see
+ *
+ * SYMMETRIC_DEFAULT key spec in the Key Management Service Developer Guide.
* If you need a key for basic encryption and decryption or you * are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.
*@@ -682,9 +684,9 @@ export class KMS extends KMSClient { * the type of key material in the KMS key. Then, use the
KeyUsage
parameter
* to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.
* You can't change these properties after the KMS key is created.
- * Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric + *
Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric * KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key - * so it can be used outside of KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). + * so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). * KMS keys with ECC key pairs can be used only to sign and verify messages. * For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
*@@ -999,7 +1001,7 @@ export class KMS extends KMSClient { /** *
Deletes a custom key store. This operation does not delete the CloudHSM cluster that is * associated with the custom key store, or affect any users or keys in the cluster.
- *The custom key store that you delete cannot contain any KMS KMS keys. Before deleting the key store, + *
The custom key store that you delete cannot contain any KMS keys. Before deleting the key store,
* verify that you will never need to use any of the KMS keys in the key store for any
* cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the
* key store. When the scheduled waiting period expires, the ScheduleKeyDeletion
@@ -1013,7 +1015,7 @@ export class KMS extends KMSClient {
* delete KMS keys and you can reconnect a disconnected custom key store at any time.
If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*@@ -1145,7 +1147,7 @@ export class KMS extends KMSClient { /** *
Gets information about custom key stores in the account and Region.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*By default, this operation returns information about all custom key @@ -1247,7 +1249,7 @@ export class KMS extends KMSClient { *
Whether automatic key rotation is enabled on the KMS key. To get this information, use * GetKeyRotationStatus. Also, some key states prevent a KMS key from * being automatically rotated. For details, see How Automatic Key Rotation - * Works in Key Management Service Developer Guide.
+ * Works in the Key Management Service Developer Guide. *Tags on the KMS key. To get this information, use ListResourceTags.
@@ -1452,7 +1454,7 @@ export class KMS extends KMSClient { * ConnectCustomKeyStore operation. *If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
* @@ -1649,7 +1651,7 @@ export class KMS extends KMSClient { *InvalidCiphertextException
. For more information, see Encryption
* Context in the Key Management Service Developer Guide.
* If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The - * algorithm must be compatible with the KMS key type.
+ * algorithm must be compatible with the KMS key spec. *When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.
*You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.
@@ -1713,6 +1715,10 @@ export class KMS extends KMSClient { *
+ * SM2PKE
: 1024 bytes (China Regions only)
The KMS key that you use for this operation must be in a compatible key state. For * details, see Key states of KMS keys in the Key Management Service Developer Guide.
@@ -1770,15 +1776,21 @@ export class KMS extends KMSClient { /** *Returns a unique symmetric data key for use outside of KMS. This operation returns a * plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS - * key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS - * key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted - * data key with the encrypted data.
+ * key that you specify. The bytes in the plaintext key are random; they are not related + * to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS + * and store the encrypted data key with the encrypted data. * *To generate a data key, specify the symmetric encryption KMS key that will be used to
* encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the
- * type of your KMS key, use the DescribeKey operation. You must also specify the length of
- * the data key. Use either the KeySpec
or NumberOfBytes
parameters
- * (but not both). For 128-bit and 256-bit data keys, use the KeySpec
parameter.
You must also specify the length of the data key. Use either the KeySpec
or
+ * NumberOfBytes
parameters (but not both). For 128-bit and 256-bit data keys, use
+ * the KeySpec
parameter.
To generate an SM4 data key (China Regions only), specify a KeySpec
value of
+ * AES_128
or NumberOfBytes
value of 128
. The symmetric
+ * encryption key used in China Regions to encrypt your data key is an SM4 encryption key.
To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use * the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure @@ -1910,9 +1922,9 @@ export class KMS extends KMSClient { * custom key store. To get the type and origin of your KMS key, use the DescribeKey * operation.
*Use the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data
- * key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for
- * either encryption or signing, but not both. However, KMS cannot enforce any restrictions on
- * the use of data key pairs outside of KMS.
If you are using the data key pair to encrypt data, or for any operation where you don't * immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. @@ -2017,9 +2029,9 @@ export class KMS extends KMSClient { * custom key store. To get the type and origin of your KMS key, use the DescribeKey * operation.
*Use the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data
- * key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for
- * either encryption or signing, but not both. However, KMS cannot enforce any restrictions on
- * the use of data key pairs outside of KMS.
* GenerateDataKeyPairWithoutPlaintext
returns a unique data key pair for each
* request. The bytes in the key are not related to the caller or KMS key that is used to encrypt
@@ -2258,13 +2270,16 @@ export class KMS extends KMSClient {
/**
*
Returns a random byte string that is cryptographically secure.
+ *You must use the NumberOfBytes
parameter to specify the length of the random
+ * byte string. There is no default value for string length.
By default, the random byte string is generated in KMS. To generate the byte string in * the CloudHSM cluster that is associated with a custom key store, specify the custom key store * ID.
*Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.
*For more information about entropy and random number generation, see * Key Management Service Cryptographic Details.
- * + *
+ * Cross-account use: Not applicable. GenerateRandom
does not use any account-specific resources, such as KMS keys.
* Required permissions: kms:GenerateRandom (IAM policy)
*/ @@ -2495,8 +2510,11 @@ export class KMS extends KMSClient { * KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the * public key within KMS, you benefit from the authentication, authorization, and logging that * are part of every KMS operation. You also reduce of risk of encrypting data that cannot be - * decrypted. These features are not effective outside of KMS. For details, see Special - * Considerations for Downloading Public Keys. + * decrypted. These features are not effective outside of KMS. + *To verify a signature outside of KMS with an SM2 public key (China Regions only), you must
+ * specify the distinguishing ID. By default, KMS uses 1234567812345678
as the
+ * distinguishing ID. For more information, see Offline verification
+ * with SM2 key pairs.
To help you use the public key safely outside of KMS, GetPublicKey
returns
* important information about the public key in the response, including:
If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*@@ -4130,7 +4148,11 @@ export class KMS extends KMSClient { * signature.
*You can also verify the digital signature by using the public key of the KMS key outside
* of KMS. Use the GetPublicKey operation to download the public key in the
- * asymmetric KMS key and then use the public key to verify the signature outside of KMS. The
+ * asymmetric KMS key and then use the public key to verify the signature outside of KMS. To
+ * verify a signature outside of KMS with an SM2 public key, you must specify the distinguishing
+ * ID. By default, KMS uses 1234567812345678
as the distinguishing ID. For more
+ * information, see Offline
+ * verification with SM2 key pairs in Key Management Service Developer Guide. The
* advantage of using the Verify
operation is that it is performed within KMS. As
* a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged
* in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use
diff --git a/clients/client-kms/src/commands/CreateCustomKeyStoreCommand.ts b/clients/client-kms/src/commands/CreateCustomKeyStoreCommand.ts
index ca08cd4de3467..7d6c110b69db4 100644
--- a/clients/client-kms/src/commands/CreateCustomKeyStoreCommand.ts
+++ b/clients/client-kms/src/commands/CreateCustomKeyStoreCommand.ts
@@ -25,7 +25,7 @@ export interface CreateCustomKeyStoreCommandOutput extends CreateCustomKeyStoreR
/**
*
Creates a custom key store that is associated with an CloudHSM cluster that you own and * manage.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*Before you create the custom key store, you must assemble diff --git a/clients/client-kms/src/commands/CreateKeyCommand.ts b/clients/client-kms/src/commands/CreateKeyCommand.ts index e788a85ff14a6..5f739f6930f85 100644 --- a/clients/client-kms/src/commands/CreateKeyCommand.ts +++ b/clients/client-kms/src/commands/CreateKeyCommand.ts @@ -34,7 +34,9 @@ export interface CreateKeyCommandOutput extends CreateKeyResponse, __MetadataBea *
To create a symmetric encryption KMS key, you aren't required to specify any parameters. The default value for
* KeySpec
, SYMMETRIC_DEFAULT
, and the default value for
- * KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key.
KeyUsage
, ENCRYPT_DECRYPT
, create a symmetric encryption KMS key. For technical details, see
+ *
+ * SYMMETRIC_DEFAULT key spec in the Key Management Service Developer Guide.
* If you need a key for basic encryption and decryption or you * are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.
*@@ -45,9 +47,9 @@ export interface CreateKeyCommandOutput extends CreateKeyResponse, __MetadataBea * the type of key material in the KMS key. Then, use the
KeyUsage
parameter
* to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.
* You can't change these properties after the KMS key is created.
- * Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in an asymmetric + *
Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric * KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key - * so it can be used outside of KMS. KMS keys with RSA key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). + * so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). * KMS keys with ECC key pairs can be used only to sign and verify messages. * For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.
*diff --git a/clients/client-kms/src/commands/DeleteCustomKeyStoreCommand.ts b/clients/client-kms/src/commands/DeleteCustomKeyStoreCommand.ts index daca2d97e23ae..3bc3d125ed990 100644 --- a/clients/client-kms/src/commands/DeleteCustomKeyStoreCommand.ts +++ b/clients/client-kms/src/commands/DeleteCustomKeyStoreCommand.ts @@ -25,7 +25,7 @@ export interface DeleteCustomKeyStoreCommandOutput extends DeleteCustomKeyStoreR /** *
Deletes a custom key store. This operation does not delete the CloudHSM cluster that is * associated with the custom key store, or affect any users or keys in the cluster.
- *The custom key store that you delete cannot contain any KMS KMS keys. Before deleting the key store, + *
The custom key store that you delete cannot contain any KMS keys. Before deleting the key store,
* verify that you will never need to use any of the KMS keys in the key store for any
* cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the
* key store. When the scheduled waiting period expires, the ScheduleKeyDeletion
@@ -39,7 +39,7 @@ export interface DeleteCustomKeyStoreCommandOutput extends DeleteCustomKeyStoreR
* delete KMS keys and you can reconnect a disconnected custom key store at any time.
If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*diff --git a/clients/client-kms/src/commands/DescribeCustomKeyStoresCommand.ts b/clients/client-kms/src/commands/DescribeCustomKeyStoresCommand.ts index ca7902bc23843..c717d44127ca7 100644 --- a/clients/client-kms/src/commands/DescribeCustomKeyStoresCommand.ts +++ b/clients/client-kms/src/commands/DescribeCustomKeyStoresCommand.ts @@ -24,7 +24,7 @@ export interface DescribeCustomKeyStoresCommandOutput extends DescribeCustomKeyS /** *
Gets information about custom key stores in the account and Region.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*By default, this operation returns information about all custom key diff --git a/clients/client-kms/src/commands/DescribeKeyCommand.ts b/clients/client-kms/src/commands/DescribeKeyCommand.ts index ad303d0551df6..df77f6726f620 100644 --- a/clients/client-kms/src/commands/DescribeKeyCommand.ts +++ b/clients/client-kms/src/commands/DescribeKeyCommand.ts @@ -41,7 +41,7 @@ export interface DescribeKeyCommandOutput extends DescribeKeyResponse, __Metadat *
Whether automatic key rotation is enabled on the KMS key. To get this information, use * GetKeyRotationStatus. Also, some key states prevent a KMS key from * being automatically rotated. For details, see How Automatic Key Rotation - * Works in Key Management Service Developer Guide.
+ * Works in the Key Management Service Developer Guide. *Tags on the KMS key. To get this information, use ListResourceTags.
diff --git a/clients/client-kms/src/commands/DisconnectCustomKeyStoreCommand.ts b/clients/client-kms/src/commands/DisconnectCustomKeyStoreCommand.ts index 2e63ccbea51c3..9c0d6ec1e803d 100644 --- a/clients/client-kms/src/commands/DisconnectCustomKeyStoreCommand.ts +++ b/clients/client-kms/src/commands/DisconnectCustomKeyStoreCommand.ts @@ -36,7 +36,7 @@ export interface DisconnectCustomKeyStoreCommandOutput extends DisconnectCustomK * ConnectCustomKeyStore operation. *If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
* diff --git a/clients/client-kms/src/commands/EncryptCommand.ts b/clients/client-kms/src/commands/EncryptCommand.ts index f79ffe8cc6331..6e7f374e1aa32 100644 --- a/clients/client-kms/src/commands/EncryptCommand.ts +++ b/clients/client-kms/src/commands/EncryptCommand.ts @@ -33,7 +33,7 @@ export interface EncryptCommandOutput extends EncryptResponse, __MetadataBearer *InvalidCiphertextException
. For more information, see Encryption
* Context in the Key Management Service Developer Guide.
* If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The - * algorithm must be compatible with the KMS key type.
+ * algorithm must be compatible with the KMS key spec. *When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.
*You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.
@@ -97,6 +97,10 @@ export interface EncryptCommandOutput extends EncryptResponse, __MetadataBearer *
+ * SM2PKE
: 1024 bytes (China Regions only)
The KMS key that you use for this operation must be in a compatible key state. For * details, see Key states of KMS keys in the Key Management Service Developer Guide.
diff --git a/clients/client-kms/src/commands/GenerateDataKeyCommand.ts b/clients/client-kms/src/commands/GenerateDataKeyCommand.ts index 9f674a315610c..e0f2e9e56aeb7 100644 --- a/clients/client-kms/src/commands/GenerateDataKeyCommand.ts +++ b/clients/client-kms/src/commands/GenerateDataKeyCommand.ts @@ -25,15 +25,21 @@ export interface GenerateDataKeyCommandOutput extends GenerateDataKeyResponse, _ /** *Returns a unique symmetric data key for use outside of KMS. This operation returns a * plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS - * key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS - * key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted - * data key with the encrypted data.
+ * key that you specify. The bytes in the plaintext key are random; they are not related + * to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS + * and store the encrypted data key with the encrypted data. * *To generate a data key, specify the symmetric encryption KMS key that will be used to
* encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the
- * type of your KMS key, use the DescribeKey operation. You must also specify the length of
- * the data key. Use either the KeySpec
or NumberOfBytes
parameters
- * (but not both). For 128-bit and 256-bit data keys, use the KeySpec
parameter.
You must also specify the length of the data key. Use either the KeySpec
or
+ * NumberOfBytes
parameters (but not both). For 128-bit and 256-bit data keys, use
+ * the KeySpec
parameter.
To generate an SM4 data key (China Regions only), specify a KeySpec
value of
+ * AES_128
or NumberOfBytes
value of 128
. The symmetric
+ * encryption key used in China Regions to encrypt your data key is an SM4 encryption key.
To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use * the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure diff --git a/clients/client-kms/src/commands/GenerateDataKeyPairCommand.ts b/clients/client-kms/src/commands/GenerateDataKeyPairCommand.ts index c2b16f35cdb2b..a671d1cb361eb 100644 --- a/clients/client-kms/src/commands/GenerateDataKeyPairCommand.ts +++ b/clients/client-kms/src/commands/GenerateDataKeyPairCommand.ts @@ -39,9 +39,9 @@ export interface GenerateDataKeyPairCommandOutput extends GenerateDataKeyPairRes * custom key store. To get the type and origin of your KMS key, use the DescribeKey * operation.
*Use the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data
- * key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for
- * either encryption or signing, but not both. However, KMS cannot enforce any restrictions on
- * the use of data key pairs outside of KMS.
If you are using the data key pair to encrypt data, or for any operation where you don't * immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. diff --git a/clients/client-kms/src/commands/GenerateDataKeyPairWithoutPlaintextCommand.ts b/clients/client-kms/src/commands/GenerateDataKeyPairWithoutPlaintextCommand.ts index f8df32c2c444b..17dcf673805f7 100644 --- a/clients/client-kms/src/commands/GenerateDataKeyPairWithoutPlaintextCommand.ts +++ b/clients/client-kms/src/commands/GenerateDataKeyPairWithoutPlaintextCommand.ts @@ -41,9 +41,9 @@ export interface GenerateDataKeyPairWithoutPlaintextCommandOutput * custom key store. To get the type and origin of your KMS key, use the DescribeKey * operation.
*Use the KeyPairSpec
parameter to choose an RSA or Elliptic Curve (ECC) data
- * key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs for
- * either encryption or signing, but not both. However, KMS cannot enforce any restrictions on
- * the use of data key pairs outside of KMS.
* GenerateDataKeyPairWithoutPlaintext
returns a unique data key pair for each
* request. The bytes in the key are not related to the caller or KMS key that is used to encrypt
diff --git a/clients/client-kms/src/commands/GenerateRandomCommand.ts b/clients/client-kms/src/commands/GenerateRandomCommand.ts
index 5c1453a1682b4..6ddb12452e15e 100644
--- a/clients/client-kms/src/commands/GenerateRandomCommand.ts
+++ b/clients/client-kms/src/commands/GenerateRandomCommand.ts
@@ -24,13 +24,16 @@ export interface GenerateRandomCommandOutput extends GenerateRandomResponse, __M
/**
*
Returns a random byte string that is cryptographically secure.
+ *You must use the NumberOfBytes
parameter to specify the length of the random
+ * byte string. There is no default value for string length.
By default, the random byte string is generated in KMS. To generate the byte string in * the CloudHSM cluster that is associated with a custom key store, specify the custom key store * ID.
*Applications in Amazon Web Services Nitro Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves Development Kit. For information about the supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS in the Key Management Service Developer Guide.
*For more information about entropy and random number generation, see * Key Management Service Cryptographic Details.
- * + *
+ * Cross-account use: Not applicable. GenerateRandom
does not use any account-specific resources, such as KMS keys.
* Required permissions: kms:GenerateRandom (IAM policy)
* @example diff --git a/clients/client-kms/src/commands/GetPublicKeyCommand.ts b/clients/client-kms/src/commands/GetPublicKeyCommand.ts index eb588316d7d2f..6b8e236969a4b 100644 --- a/clients/client-kms/src/commands/GetPublicKeyCommand.ts +++ b/clients/client-kms/src/commands/GetPublicKeyCommand.ts @@ -32,8 +32,11 @@ export interface GetPublicKeyCommandOutput extends GetPublicKeyResponse, __Metad * KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the * public key within KMS, you benefit from the authentication, authorization, and logging that * are part of every KMS operation. You also reduce of risk of encrypting data that cannot be - * decrypted. These features are not effective outside of KMS. For details, see Special - * Considerations for Downloading Public Keys. + * decrypted. These features are not effective outside of KMS. + *To verify a signature outside of KMS with an SM2 public key (China Regions only), you must
+ * specify the distinguishing ID. By default, KMS uses 1234567812345678
as the
+ * distinguishing ID. For more information, see Offline verification
+ * with SM2 key pairs.
To help you use the public key safely outside of KMS, GetPublicKey
returns
* important information about the public key in the response, including:
If the operation succeeds, it returns a JSON object with no * properties.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*diff --git a/clients/client-kms/src/commands/VerifyCommand.ts b/clients/client-kms/src/commands/VerifyCommand.ts index dff46a267f423..52fccfba53d6e 100644 --- a/clients/client-kms/src/commands/VerifyCommand.ts +++ b/clients/client-kms/src/commands/VerifyCommand.ts @@ -35,7 +35,11 @@ export interface VerifyCommandOutput extends VerifyResponse, __MetadataBearer {} * signature.
*You can also verify the digital signature by using the public key of the KMS key outside
* of KMS. Use the GetPublicKey operation to download the public key in the
- * asymmetric KMS key and then use the public key to verify the signature outside of KMS. The
+ * asymmetric KMS key and then use the public key to verify the signature outside of KMS. To
+ * verify a signature outside of KMS with an SM2 public key, you must specify the distinguishing
+ * ID. By default, KMS uses 1234567812345678
as the distinguishing ID. For more
+ * information, see Offline
+ * verification with SM2 key pairs in Key Management Service Developer Guide. The
* advantage of using the Verify
operation is that it is performed within KMS. As
* a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged
* in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use
diff --git a/clients/client-kms/src/models/models_0.ts b/clients/client-kms/src/models/models_0.ts
index b00f17b244947..565a884c467b8 100644
--- a/clients/client-kms/src/models/models_0.ts
+++ b/clients/client-kms/src/models/models_0.ts
@@ -450,6 +450,7 @@ export class CustomKeyStoreNotFoundException extends __BaseException {
export enum ConnectionErrorCodeType {
CLUSTER_NOT_FOUND = "CLUSTER_NOT_FOUND",
INSUFFICIENT_CLOUDHSM_HSMS = "INSUFFICIENT_CLOUDHSM_HSMS",
+ INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET = "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET",
INTERNAL_ERROR = "INTERNAL_ERROR",
INVALID_CREDENTIALS = "INVALID_CREDENTIALS",
NETWORK_ERRORS = "NETWORK_ERRORS",
@@ -565,13 +566,13 @@ export interface CreateCustomKeyStoreRequest {
* CloudHSM cluster that is not already associated with a custom key store. To find the cluster ID,
* use the DescribeClusters operation.
Enter the content of the trust anchor certificate for the cluster. This is the content of
* the customerCA.crt
file that you created when you initialized the cluster.
A key policy document must conform to the following rules.
+ *A key policy document can include only the following characters:
*Up to 32 kilobytes (32768 bytes)
+ *Printable ASCII characters from the space character (\u0020
) through the end of the ASCII character range.
Must be UTF-8 encoded
+ *Printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF
).
The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.
- *The Sid
element in a key policy statement can include spaces. (Spaces are
- * prohibited in the Sid
element of an IAM policy document.)
The tab (\u0009
), line feed (\u000A
), and carriage return (\u000D
) special characters
For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the + *
For information about key policies, see Key policies in KMS in the + * Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the * Identity and Access Management User Guide * .
*/ @@ -1045,6 +1045,10 @@ export interface CreateKeyRequest { *For asymmetric KMS keys with ECC key material, specify
* SIGN_VERIFY
.
For asymmetric KMS keys with SM2 key material (China Regions only), specify ENCRYPT_DECRYPT
or
+ * SIGN_VERIFY
.
Specifies the type of KMS key to create. The default value,
- * SYMMETRIC_DEFAULT
, creates a KMS key with a 256-bit symmetric key for encryption
- * and decryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the
+ * SYMMETRIC_DEFAULT
, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions,
+ * where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the
* Key Management Service Developer Guide
* .
The KeySpec
determines whether the KMS key contains a symmetric key or an
@@ -1085,7 +1089,8 @@ export interface CreateKeyRequest {
*
- * SYMMETRIC_DEFAULT
(AES-256-GCM)
SYMMETRIC_DEFAULT
+ *
* SM2 key pairs (China Regions only)
+ *
+ * SM2
+ *
To find the ID of a custom key store, use the DescribeCustomKeyStores operation.
*The response includes the custom key store ID and the ID of the CloudHSM cluster.
- *This operation is part of the Custom Key Store feature feature in KMS, which + *
This operation is part of the custom key store feature feature in KMS, which * combines the convenience and extensive integration of KMS with the isolation and control of a * single-tenant key store.
*/ @@ -1259,6 +1274,7 @@ export namespace CreateKeyRequest { export enum EncryptionAlgorithmSpec { RSAES_OAEP_SHA_1 = "RSAES_OAEP_SHA_1", RSAES_OAEP_SHA_256 = "RSAES_OAEP_SHA_256", + SM2PKE = "SM2PKE", SYMMETRIC_DEFAULT = "SYMMETRIC_DEFAULT", } @@ -1364,6 +1380,7 @@ export enum SigningAlgorithmSpec { RSASSA_PSS_SHA_256 = "RSASSA_PSS_SHA_256", RSASSA_PSS_SHA_384 = "RSASSA_PSS_SHA_384", RSASSA_PSS_SHA_512 = "RSASSA_PSS_SHA_512", + SM2DSA = "SM2DSA", } /** @@ -1801,6 +1818,7 @@ export enum DataKeyPairSpec { RSA_2048 = "RSA_2048", RSA_3072 = "RSA_3072", RSA_4096 = "RSA_4096", + SM2 = "SM2", } export enum DataKeySpec { @@ -2659,7 +2677,7 @@ export interface GenerateDataKeyPairRequest { /** *Determines the type of data key pair that is generated.
- *The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS.
+ *The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.
*/ KeyPairSpec: DataKeyPairSpec | string | undefined; @@ -2760,7 +2778,7 @@ export interface GenerateDataKeyPairWithoutPlaintextRequest { /** *Determines the type of data key pair that is generated.
- *The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS.
+ *The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. RSA and ECC asymmetric key pairs are also available in China Regions.
*/ KeyPairSpec: DataKeyPairSpec | string | undefined; @@ -2975,7 +2993,7 @@ export namespace GenerateMacResponse { export interface GenerateRandomRequest { /** - *The length of the byte string.
+ *The length of the random byte string. This parameter is required.
*/ NumberOfBytes?: number; @@ -4034,22 +4052,22 @@ export interface PutKeyPolicyRequest { *A key policy document must conform to the following rules.
+ *A key policy document can include only the following characters:
*Up to 32 kilobytes (32768 bytes)
- *Must be UTF-8 encoded
+ *Printable ASCII characters from the space character (\u0020
) through the end of the ASCII character range.
The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.
+ *Printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF
).
The Sid
element in a key policy statement can include spaces. (Spaces are
- * prohibited in the Sid
element of an IAM policy document.)
The tab (\u0009
), line feed (\u000A
), and carriage return (\u000D
) special characters
For information about key policies, see Key policies in KMS in the + * Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the + * Identity and Access Management User Guide + * .
*/ Policy: string | undefined; @@ -4314,22 +4332,22 @@ export interface ReplicateKeyRequest { * * * - *A key policy document must conform to the following rules.
+ *A key policy document can include only the following characters:
*Up to 32 kilobytes (32768 bytes)
- *Must be UTF-8 encoded
+ *Printable ASCII characters from the space character (\u0020
) through the end of the ASCII character range.
The only Unicode characters that are permitted in a key policy document are the horizontal tab (U+0009), linefeed (U+000A), carriage return (U+000D), and characters in the range U+0020 to U+00FF.
+ *Printable characters in the Basic Latin and Latin-1 Supplement character set (through \u00FF
).
The Sid
element in a key policy statement can include spaces. (Spaces are
- * prohibited in the Sid
element of an IAM policy document.)
The tab (\u0009
), line feed (\u000A
), and carriage return (\u000D
) special characters
For information about key policies, see Key policies in KMS in the + * Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the + * Identity and Access Management User Guide + * .
*/ Policy?: string; @@ -4760,7 +4778,7 @@ export interface UpdateAliasRequest { /** *Identifies the alias that is changing its KMS key. This value must begin with
* alias/
followed by the alias name, such as alias/ExampleAlias
. You
- * cannot use UpdateAlias to change the alias name.
UpdateAlias
to change the alias name.
*/
AliasName: string | undefined;
diff --git a/clients/client-kms/src/pagination/DescribeCustomKeyStoresPaginator.ts b/clients/client-kms/src/pagination/DescribeCustomKeyStoresPaginator.ts
new file mode 100644
index 0000000000000..95d9ac5d6054f
--- /dev/null
+++ b/clients/client-kms/src/pagination/DescribeCustomKeyStoresPaginator.ts
@@ -0,0 +1,61 @@
+// smithy-typescript generated code
+import { Paginator } from "@aws-sdk/types";
+
+import {
+ DescribeCustomKeyStoresCommand,
+ DescribeCustomKeyStoresCommandInput,
+ DescribeCustomKeyStoresCommandOutput,
+} from "../commands/DescribeCustomKeyStoresCommand";
+import { KMS } from "../KMS";
+import { KMSClient } from "../KMSClient";
+import { KMSPaginationConfiguration } from "./Interfaces";
+
+/**
+ * @private
+ */
+const makePagedClientRequest = async (
+ client: KMSClient,
+ input: DescribeCustomKeyStoresCommandInput,
+ ...args: any
+): PromiseGeneral information about the IP set.
+ */ +export interface IPSetMetadata { + /** + *Describes the total number of CIDR blocks currently in use by the IP set references in a firewall. To determine how many CIDR blocks are available for you to use in a firewall, you can call AvailableCIDRCount
.
Summarizes the CIDR blocks used by the IP set references in a firewall. Network Firewall calculates the number of CIDRs by taking an aggregated count of all CIDRs used by the IP sets you are referencing.
+ */ +export interface CIDRSummary { + /** + *The number of CIDR blocks available for use by the IP set references in a firewall.
+ */ + AvailableCIDRCount?: number; + + /** + *The number of CIDR blocks used by the IP set references in a firewall.
+ */ + UtilizedCIDRCount?: number; + + /** + *The list of the IP set references used by a firewall.
+ */ + IPSetReferences?: RecordThe capacity usage summary of the resources used by the ReferenceSets in a firewall.
+ */ +export interface CapacityUsageSummary { + /** + *Describes the capacity usage of the CIDR blocks used by the IP set references in a firewall.
+ */ + CIDRs?: CIDRSummary; +} + +export namespace CapacityUsageSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CapacityUsageSummary): any => ({ + ...obj, + }); +} + export enum ConfigurationSyncState { + CAPACITY_CONSTRAINED = "CAPACITY_CONSTRAINED", IN_SYNC = "IN_SYNC", PENDING = "PENDING", } @@ -699,6 +767,7 @@ export enum FirewallStatusValue { } export enum PerObjectSyncStatus { + CAPACITY_CONSTRAINED = "CAPACITY_CONSTRAINED", IN_SYNC = "IN_SYNC", PENDING = "PENDING", } @@ -810,6 +879,11 @@ export interface FirewallStatus { * and configuration object. */ SyncStates?: RecordDescribes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calclulates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.
+ */ + CapacityUsageSummary?: CapacityUsageSummary; } export namespace FirewallStatus { @@ -1268,6 +1342,47 @@ export namespace CreateFirewallPolicyResponse { }); } +/** + *Configures one or more IP set references for a Suricata-compatible rule group. This is used in CreateRuleGroup or UpdateRuleGroup. An IP set reference is a rule variable that references a resource that you create and manage in another Amazon Web Services service, such as an Amazon VPC prefix list. Network Firewall IP set references enable you to dynamically update the contents of your rules. When you create, update, or delete the IP set you are referencing in your rule, Network Firewall automatically updates the rule's content with the changes. For more information about IP set references in Network Firewall, see Using IP set references in the Network Firewall Developer Guide.
+ *+ * Network Firewall currently supports only Amazon VPC prefix lists as IP set references. + *
+ */ +export interface IPSetReference { + /** + *The Amazon Resource Name (ARN) of the resource that you are referencing in your rule group.
+ */ + ReferenceArn?: string; +} + +export namespace IPSetReference { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IPSetReference): any => ({ + ...obj, + }); +} + +/** + *Contains a set of IP set references.
+ */ +export interface ReferenceSets { + /** + *The list of IP set references.
+ */ + IPSetReferences?: RecordThe list of a rule group's reference sets.
+ */ + ReferenceSets?: ReferenceSets; + /** *The stateful rules or stateless rules for the rule group.
*/ diff --git a/clients/client-network-firewall/src/protocols/Aws_json1_0.ts b/clients/client-network-firewall/src/protocols/Aws_json1_0.ts index f3bb6ff4df143..60c83e340a4d1 100644 --- a/clients/client-network-firewall/src/protocols/Aws_json1_0.ts +++ b/clients/client-network-firewall/src/protocols/Aws_json1_0.ts @@ -109,6 +109,8 @@ import { AssociateSubnetsRequest, AssociateSubnetsResponse, Attachment, + CapacityUsageSummary, + CIDRSummary, CreateFirewallPolicyRequest, CreateFirewallPolicyResponse, CreateFirewallRequest, @@ -154,6 +156,8 @@ import { InvalidResourcePolicyException, InvalidTokenException, IPSet, + IPSetMetadata, + IPSetReference, LimitExceededException, ListFirewallPoliciesRequest, ListFirewallPoliciesResponse, @@ -173,6 +177,7 @@ import { PublishMetricAction, PutResourcePolicyRequest, PutResourcePolicyResponse, + ReferenceSets, ResourceNotFoundException, ResourceOwnerCheckException, RuleDefinition, @@ -2842,6 +2847,24 @@ const serializeAws_json1_0IPSet = (input: IPSet, context: __SerdeContext): any = }; }; +const serializeAws_json1_0IPSetReference = (input: IPSetReference, context: __SerdeContext): any => { + return { + ...(input.ReferenceArn != null && { ReferenceArn: input.ReferenceArn }), + }; +}; + +const serializeAws_json1_0IPSetReferenceMap = (input: RecordThis action only applies to Aurora MySQL DB clusters.
+ *This action applies only to Aurora MySQL DB clusters.
*Copies a snapshot of a DB cluster.
*To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
* must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot
action
- * is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region,
- * you must provide the following values:
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case,
+ * the Amazon Web Services Region where you call the CopyDBClusterSnapshot
operation is the
+ * destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy
+ * an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the
+ * following values:
@@ -1062,49 +1064,14 @@ export class RDS extends RDSClient { *
- * PreSignedUrl
- A URL that contains a Signature Version 4 signed request for the
- * CopyDBClusterSnapshot
action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from.
- * The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be executed in the
- * source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.
The pre-signed URL request must contain the following parameter values:
- *
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB
- * cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
- * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
- * DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
- * SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster
- * snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example,
- * if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
- * looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see - * - * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and - * - * Signature Version 4 Signing Process.
- *If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
* TargetDBClusterSnapshotIdentifier
- The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.
- * SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied.
- * This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier
in the pre-signed URL.
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot
+ * identifier for the encrypted DB cluster snapshot to be copied. This identifier
+ * must be in the ARN format for the source Amazon Web Services Region and is the same value as
+ * the SourceDBClusterSnapshotIdentifier
in the presigned URL.
* To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified @@ -1182,9 +1149,9 @@ export class RDS extends RDSClient { /** *
Copies the specified DB snapshot. The source DB snapshot must be in the available
state.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region
- * where you call the CopyDBSnapshot
action is the destination Amazon Web Services Region for the
- * DB snapshot copy.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the
+ * Amazon Web Services Region where you call the CopyDBSnapshot
operation is the destination
+ * Amazon Web Services Region for the DB snapshot copy.
This command doesn't apply to RDS Custom.
*For more information about copying snapshots, see * Copying a DB Snapshot in the Amazon RDS User Guide.
@@ -1316,9 +1283,7 @@ export class RDS extends RDSClient { *Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.
*You can use the ReplicationSourceIdentifier
parameter to create an Amazon
* Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or
- * PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by
- * ReplicationSourceIdentifier
is encrypted, also specify the
- * PreSignedUrl
parameter.
For more information on Amazon Aurora, see * * What is Amazon Aurora? in the Amazon Aurora User Guide.
@@ -1358,7 +1323,7 @@ export class RDS extends RDSClient { /** *Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.
*This action only applies to Aurora DB clusters.
+ *This action applies only to Aurora DB clusters.
*After you create a DB cluster parameter group, you should wait at least 5 minutes
- * before creating your first DB cluster
- * that uses that DB cluster parameter group as the default parameter
- * group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter
- * group is used as the default for a new DB cluster. This is especially important for parameters
- * that are critical when creating the default database for a DB cluster, such as the character set
- * for the default database defined by the character_set_database
parameter. You can use the
- * Parameter Groups option of the Amazon RDS console or the
- * DescribeDBClusterParameters
action to verify
- * that your DB cluster parameter group has been created or modified.
character_set_database
parameter. You can use
+ * the Parameter Groups option of the Amazon RDS console or the
+ * DescribeDBClusterParameters
operation to verify that your DB
+ * cluster parameter group has been created or modified.
* For more information on Amazon Aurora, see
*
@@ -1490,6 +1455,14 @@ export class RDS extends RDSClient {
/**
* Creates a new DB instance. The new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster.
+ * For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance
+ * to the cluster. For more information about creating an RDS DB instance, see
+ * Creating an Amazon RDS DB instance in the Amazon RDS User Guide. For more information about creating a DB instance in an Aurora DB cluster, see
+ *
+ * Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.
Amazon Aurora doesn't support this action. Call the CreateDBInstance
- * action to create a DB instance for an Aurora DB cluster.
Amazon Aurora doesn't support this operation. Call the CreateDBInstance
+ * operation to create a DB instance for an Aurora DB cluster.
All read replica DB instances are created with backups disabled. All other DB * instance attributes (including DB security groups and DB parameter groups) are inherited * from the source DB instance, except as specified.
@@ -1781,7 +1754,7 @@ export class RDS extends RDSClient { } /** - *Creates an RDS event notification subscription. This action requires a topic Amazon + *
Creates an RDS event notification subscription. This operation requires a topic Amazon * Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. * To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the * topic. The ARN is displayed in the SNS console.
@@ -1845,7 +1818,7 @@ export class RDS extends RDSClient { * Or you can specify an existing Aurora cluster during the create operation, * and this cluster becomes the primary cluster of the global database. *This action only applies to Aurora DB clusters.
+ *This action applies only to Aurora DB clusters.
*Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. * Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be * obtained by providing the name as a parameter.
+ *For more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora + * events in the Amazon Aurora User Guide.
*By default, RDS returns events that were generated in the past hour.
*For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) * in the DB cluster to be the primary DB instance (the cluster writer).
*An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, - * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby + * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby * DB instance when the primary DB instance fails.
*To simulate a failure of a primary instance for testing, you can force a failover.
* Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing
@@ -4242,7 +4217,7 @@ export class RDS extends RDSClient {
* that are critical when creating the default database for a DB cluster, such as the character set
* for the default database defined by the character_set_database
parameter. You can use the
* Parameter Groups option of the Amazon RDS console or the
- * DescribeDBClusterParameters
action to verify
+ * DescribeDBClusterParameters
operation to verify
* that your DB cluster parameter group has been created or modified.
If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora
* applies the update immediately. The cluster restart might interrupt your workload. In that case,
@@ -4304,7 +4279,7 @@ export class RDS extends RDSClient {
* parameter. You can't use all
as a value for that parameter in this
* case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster
- * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are
+ * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are
* returned as values for the restore
attribute.
ValuesToAdd
parameter. You
* can't use all
as a value for that parameter in this case.
* To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or
- * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as
+ * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as
* values for the restore
attribute.
For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide. *
*This command doesn't apply to RDS Custom.
+ *If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster
operation.
Stops automated backup replication for a DB instance.
- *This command doesn't apply to RDS Custom.
+ *This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.
*For more information, see * Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide. *
diff --git a/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts b/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts index 77d8876d1516b..e370d060d1985 100644 --- a/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts +++ b/clients/client-rds/src/commands/BacktrackDBClusterCommand.ts @@ -29,7 +29,7 @@ export interface BacktrackDBClusterCommandOutput extends DBClusterBacktrack, __M * Backtracking an Aurora DB Cluster in the * Amazon Aurora User Guide. *This action only applies to Aurora MySQL DB clusters.
+ *This action applies only to Aurora MySQL DB clusters.
*Copies a snapshot of a DB cluster.
*To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier
* must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case, the Amazon Web Services Region where you call the CopyDBClusterSnapshot
action
- * is the destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another Amazon Web Services Region,
- * you must provide the following values:
You can copy an encrypted DB cluster snapshot from another Amazon Web Services Region. In that case,
+ * the Amazon Web Services Region where you call the CopyDBClusterSnapshot
operation is the
+ * destination Amazon Web Services Region for the encrypted DB cluster snapshot to be copied to. To copy
+ * an encrypted DB cluster snapshot from another Amazon Web Services Region, you must provide the
+ * following values:
@@ -38,49 +40,14 @@ export interface CopyDBClusterSnapshotCommandOutput extends CopyDBClusterSnapsho *
- * PreSignedUrl
- A URL that contains a Signature Version 4 signed request for the
- * CopyDBClusterSnapshot
action to be called in the source Amazon Web Services Region where the DB cluster snapshot is copied from.
- * The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be executed in the
- * source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.
The pre-signed URL request must contain the following parameter values:
- *
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB
- * cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
- * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
- * DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
- * SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster
- * snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source Amazon Web Services Region. For example,
- * if you are copying an encrypted DB cluster snapshot from the us-west-2 Amazon Web Services Region, then your SourceDBClusterSnapshotIdentifier
- * looks like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115
.
To learn how to generate a Signature Version 4 signed request, see - * - * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and - * - * Signature Version 4 Signing Process.
- *If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
* TargetDBClusterSnapshotIdentifier
- The identifier for the new copy of the DB cluster snapshot in the destination Amazon Web Services Region.
- * SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied.
- * This identifier must be in the ARN format for the source Amazon Web Services Region and is the same value as the SourceDBClusterSnapshotIdentifier
in the pre-signed URL.
SourceDBClusterSnapshotIdentifier
- The DB cluster snapshot
+ * identifier for the encrypted DB cluster snapshot to be copied. This identifier
+ * must be in the ARN format for the source Amazon Web Services Region and is the same value as
+ * the SourceDBClusterSnapshotIdentifier
in the presigned URL.
* To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified diff --git a/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts b/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts index 5b0b9989906ab..354da5bb7264a 100644 --- a/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts +++ b/clients/client-rds/src/commands/CopyDBSnapshotCommand.ts @@ -25,9 +25,9 @@ export interface CopyDBSnapshotCommandOutput extends CopyDBSnapshotResult, __Met /** *
Copies the specified DB snapshot. The source DB snapshot must be in the available
state.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the Amazon Web Services Region
- * where you call the CopyDBSnapshot
action is the destination Amazon Web Services Region for the
- * DB snapshot copy.
You can copy a snapshot from one Amazon Web Services Region to another. In that case, the
+ * Amazon Web Services Region where you call the CopyDBSnapshot
operation is the destination
+ * Amazon Web Services Region for the DB snapshot copy.
This command doesn't apply to RDS Custom.
*For more information about copying snapshots, see * Copying a DB Snapshot in the Amazon RDS User Guide.
diff --git a/clients/client-rds/src/commands/CreateDBClusterCommand.ts b/clients/client-rds/src/commands/CreateDBClusterCommand.ts index a21220879534a..1e3004de5e061 100644 --- a/clients/client-rds/src/commands/CreateDBClusterCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterCommand.ts @@ -27,9 +27,7 @@ export interface CreateDBClusterCommandOutput extends CreateDBClusterResult, __M *Creates a new Amazon Aurora DB cluster or Multi-AZ DB cluster.
*You can use the ReplicationSourceIdentifier
parameter to create an Amazon
* Aurora DB cluster as a read replica of another DB cluster or Amazon RDS MySQL or
- * PostgreSQL DB instance. For cross-Region replication where the DB cluster identified by
- * ReplicationSourceIdentifier
is encrypted, also specify the
- * PreSignedUrl
parameter.
For more information on Amazon Aurora, see * * What is Amazon Aurora? in the Amazon Aurora User Guide.
diff --git a/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts b/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts index 77e3008cb4de0..2404b4edf0f20 100644 --- a/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts +++ b/clients/client-rds/src/commands/CreateDBClusterEndpointCommand.ts @@ -25,7 +25,7 @@ export interface CreateDBClusterEndpointCommandOutput extends DBClusterEndpoint, /** *Creates a new custom endpoint and associates it with an Amazon Aurora DB cluster.
*This action only applies to Aurora DB clusters.
+ *This action applies only to Aurora DB clusters.
*After you create a DB cluster parameter group, you should wait at least 5 minutes
- * before creating your first DB cluster
- * that uses that DB cluster parameter group as the default parameter
- * group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter
- * group is used as the default for a new DB cluster. This is especially important for parameters
- * that are critical when creating the default database for a DB cluster, such as the character set
- * for the default database defined by the character_set_database
parameter. You can use the
- * Parameter Groups option of the Amazon RDS console or the
- * DescribeDBClusterParameters
action to verify
- * that your DB cluster parameter group has been created or modified.
character_set_database
parameter. You can use
+ * the Parameter Groups option of the Amazon RDS console or the
+ * DescribeDBClusterParameters
operation to verify that your DB
+ * cluster parameter group has been created or modified.
* For more information on Amazon Aurora, see
*
diff --git a/clients/client-rds/src/commands/CreateDBInstanceCommand.ts b/clients/client-rds/src/commands/CreateDBInstanceCommand.ts
index 447a1f1307277..2d232eec34b53 100644
--- a/clients/client-rds/src/commands/CreateDBInstanceCommand.ts
+++ b/clients/client-rds/src/commands/CreateDBInstanceCommand.ts
@@ -24,6 +24,14 @@ export interface CreateDBInstanceCommandOutput extends CreateDBInstanceResult, _
/**
* Creates a new DB instance. The new DB instance can be an RDS DB instance, or it can be a DB instance in an Aurora DB cluster.
+ * For an Aurora DB cluster, you can call this operation multiple times to add more than one DB instance
+ * to the cluster. For more information about creating an RDS DB instance, see
+ * Creating an Amazon RDS DB instance in the Amazon RDS User Guide. For more information about creating a DB instance in an Aurora DB cluster, see
+ *
+ * Creating an Amazon Aurora DB cluster in the Amazon Aurora User Guide.
Amazon Aurora doesn't support this action. Call the CreateDBInstance
- * action to create a DB instance for an Aurora DB cluster.
Amazon Aurora doesn't support this operation. Call the CreateDBInstance
+ * operation to create a DB instance for an Aurora DB cluster.
All read replica DB instances are created with backups disabled. All other DB * instance attributes (including DB security groups and DB parameter groups) are inherited * from the source DB instance, except as specified.
diff --git a/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts b/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts index 65e45dc947df6..c12ea4af2652a 100644 --- a/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts +++ b/clients/client-rds/src/commands/CreateEventSubscriptionCommand.ts @@ -23,7 +23,7 @@ export interface CreateEventSubscriptionCommandInput extends CreateEventSubscrip export interface CreateEventSubscriptionCommandOutput extends CreateEventSubscriptionResult, __MetadataBearer {} /** - *Creates an RDS event notification subscription. This action requires a topic Amazon + *
Creates an RDS event notification subscription. This operation requires a topic Amazon * Resource Name (ARN) created by either the RDS console, the SNS console, or the SNS API. * To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the * topic. The ARN is displayed in the SNS console.
diff --git a/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts b/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts index d025ff7872542..a74eca4ec68f2 100644 --- a/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts +++ b/clients/client-rds/src/commands/CreateGlobalClusterCommand.ts @@ -34,7 +34,7 @@ export interface CreateGlobalClusterCommandOutput extends CreateGlobalClusterRes * Or you can specify an existing Aurora cluster during the create operation, * and this cluster becomes the primary cluster of the global database. *This action only applies to Aurora DB clusters.
+ *This action applies only to Aurora DB clusters.
*Returns events related to DB instances, DB clusters, DB parameter groups, DB security groups, DB snapshots, DB cluster snapshots, and RDS Proxies for the past 14 days. * Events specific to a particular DB instance, DB cluster, DB parameter group, DB security group, DB snapshot, DB cluster snapshot group, or RDS Proxy can be * obtained by providing the name as a parameter.
+ *For more information on working with events, see Monitoring Amazon RDS events in the Amazon RDS User Guide and Monitoring Amazon Aurora + * events in the Amazon Aurora User Guide.
*By default, RDS returns events that were generated in the past hour.
*For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) * in the DB cluster to be the primary DB instance (the cluster writer).
*An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, - * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readbable standby + * when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby * DB instance when the primary DB instance fails.
*To simulate a failure of a primary instance for testing, you can force a failover.
* Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing
diff --git a/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts b/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts
index 39aa859d1b31e..fd31289a20316 100644
--- a/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts
+++ b/clients/client-rds/src/commands/ModifyDBClusterParameterGroupCommand.ts
@@ -37,7 +37,7 @@ export interface ModifyDBClusterParameterGroupCommandOutput
* that are critical when creating the default database for a DB cluster, such as the character set
* for the default database defined by the character_set_database
parameter. You can use the
* Parameter Groups option of the Amazon RDS console or the
- * DescribeDBClusterParameters
action to verify
+ * DescribeDBClusterParameters
operation to verify
* that your DB cluster parameter group has been created or modified.
If the modified DB cluster parameter group is used by an Aurora Serverless v1 cluster, Aurora
* applies the update immediately. The cluster restart might interrupt your workload. In that case,
diff --git a/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts b/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts
index 6ad6af00a36f2..07291f8cf7ce0 100644
--- a/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts
+++ b/clients/client-rds/src/commands/ModifyDBClusterSnapshotAttributeCommand.ts
@@ -42,7 +42,7 @@ export interface ModifyDBClusterSnapshotAttributeCommandOutput
* parameter. You can't use all
as a value for that parameter in this
* case.
To view which Amazon Web Services accounts have access to copy or restore a manual DB cluster
- * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API action. The accounts are
+ * snapshot, or whether a manual DB cluster snapshot is public or private, use the DescribeDBClusterSnapshotAttributes API operation. The accounts are
* returned as values for the restore
attribute.
ValuesToAdd
parameter. You
* can't use all
as a value for that parameter in this case.
* To view which Amazon Web Services accounts have access to copy or restore a manual DB snapshot, or
- * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action. The accounts are returned as
+ * whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API operation. The accounts are returned as
* values for the restore
attribute.
For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide. *
*This command doesn't apply to RDS Custom.
+ *If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster
operation.
Stops automated backup replication for a DB instance.
- *This command doesn't apply to RDS Custom.
+ *This command doesn't apply to RDS Custom, Aurora MySQL, and Aurora PostgreSQL.
*For more information, see * Replicating Automated Backups to Another Amazon Web Services Region in the Amazon RDS User Guide. *
diff --git a/clients/client-rds/src/models/models_0.ts b/clients/client-rds/src/models/models_0.ts index d5d05c96e71dc..8d40e4ce37a49 100644 --- a/clients/client-rds/src/models/models_0.ts +++ b/clients/client-rds/src/models/models_0.ts @@ -1632,22 +1632,33 @@ export interface CopyDBClusterSnapshotMessage { KmsKeyId?: string; /** - *The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot
API action in the Amazon Web Services Region that contains the
- * source DB cluster snapshot to copy. The PreSignedUrl
parameter must be used when copying an encrypted DB cluster snapshot from another Amazon Web Services Region.
- * Don't specify PreSignedUrl
when you are copying an encrypted DB cluster snapshot in the same Amazon Web Services Region.
The pre-signed URL must be a valid request for the CopyDBClusterSnapshot
API action that can be
- * executed in the source Amazon Web Services Region that contains the encrypted DB cluster snapshot to be copied.
- * The pre-signed URL request must contain the following parameter values:
When you are copying a DB cluster snapshot from one Amazon Web Services GovCloud (US) Region
+ * to another, the URL that contains a Signature Version 4 signed request for the
+ * CopyDBClusterSnapshot
API operation in the Amazon Web Services Region that contains
+ * the source DB cluster snapshot to copy. Use the PreSignedUrl
parameter when
+ * copying an encrypted DB cluster snapshot from another Amazon Web Services Region. Don't specify
+ * PreSignedUrl
when copying an encrypted DB cluster snapshot in the same
+ * Amazon Web Services Region.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other + * Amazon Web Services Regions.
+ *The presigned URL must be a valid request for the
+ * CopyDBClusterSnapshot
API operation that can run in the source
+ * Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request
+ * must contain the following parameter values:
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB
- * cluster snapshot in the destination Amazon Web Services Region. This is the same identifier for both the CopyDBClusterSnapshot
- * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
KmsKeyId
- The KMS key identifier for the KMS key
+ * to use to encrypt the copy of the DB cluster snapshot in the destination
+ * Amazon Web Services Region. This is the same identifier for both the
+ * CopyDBClusterSnapshot
operation that is called in the
+ * destination Amazon Web Services Region, and the operation contained in the presigned
+ * URL.
*
- * DestinationRegion
- The name of the Amazon Web Services Region that the DB cluster snapshot is to be created in.
DestinationRegion
- The name of the Amazon Web Services Region
+ * that the DB cluster snapshot is to be created in.
* @@ -1658,15 +1669,16 @@ export interface CopyDBClusterSnapshotMessage { *
To learn how to generate a Signature Version 4 signed request, see - * * * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and * * Signature Version 4 Signing Process.
*If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify
+ * SourceRegion
(or --source-region
for the CLI)
+ * instead of specifying PreSignedUrl
manually. Specifying
+ * SourceRegion
autogenerates a presigned URL that is a valid request
+ * for the operation that can run in the source Amazon Web Services Region.
arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20130805
.
* If you are copying from a shared manual DB snapshot, * this parameter must be the Amazon Resource Name (ARN) of the shared DB snapshot.
- *If you are copying an encrypted snapshot
- * this parameter must be in the ARN format for the source Amazon Web Services Region,
- * and must match the SourceDBSnapshotIdentifier
in the PreSignedUrl
parameter.
If you are copying an encrypted snapshot this parameter must be in the ARN format for the source Amazon Web Services Region.
*Constraints:
*The URL that contains a Signature Version 4 signed request for the
- * CopyDBSnapshot
API action in the source Amazon Web Services Region that contains the
- * source DB snapshot to copy.
When you are copying a snapshot from one Amazon Web Services GovCloud (US) Region to another,
+ * the URL that contains a Signature Version 4 signed request for the CopyDBSnapshot
API
+ * operation in the source Amazon Web Services Region that contains the source DB snapshot to copy.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other + * Amazon Web Services Regions.
*You must specify this parameter when you copy an encrypted DB snapshot from another
* Amazon Web Services Region by using the Amazon RDS API. Don't specify PreSignedUrl
when you are
* copying an encrypted DB snapshot in the same Amazon Web Services Region.
The presigned URL must be a valid request for the CopyDBSnapshot
API action
- * that can be executed in the source Amazon Web Services Region that contains the encrypted DB snapshot to be copied.
- * The presigned URL request must contain the following parameter values:
The presigned URL must be a valid request for the
+ * CopyDBClusterSnapshot
API operation that can run in the source
+ * Amazon Web Services Region that contains the encrypted DB cluster snapshot to copy. The presigned URL request
+ * must contain the following parameter values:
- * DestinationRegion
- The Amazon Web Services Region that the encrypted DB snapshot is copied to.
- * This Amazon Web Services Region is the same one where the CopyDBSnapshot
action is called that contains this presigned URL.
For example, if you copy an encrypted DB snapshot from the us-west-2 Amazon Web Services Region
- * to the us-east-1 Amazon Web Services Region, then you call the CopyDBSnapshot
action in
- * the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the
- * CopyDBSnapshot
action in the us-west-2 Amazon Web Services Region. For this
- * example, the DestinationRegion
in the presigned URL must be set to
- * the us-east-1 Amazon Web Services Region.
DestinationRegion
- The Amazon Web Services Region that the encrypted DB
+ * snapshot is copied to. This Amazon Web Services Region is the same one where the
+ * CopyDBSnapshot
operation is called that contains this presigned
+ * URL.
+ * For example, if you copy an encrypted DB snapshot from the us-west-2
+ * Amazon Web Services Region to the us-east-1 Amazon Web Services Region, then you call the
+ * CopyDBSnapshot
operation in the us-east-1 Amazon Web Services Region and
+ * provide a presigned URL that contains a call to the CopyDBSnapshot
+ * operation in the us-west-2 Amazon Web Services Region. For this example, the
+ * DestinationRegion
in the presigned URL must be set to the
+ * us-east-1 Amazon Web Services Region.
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of the DB snapshot in the destination Amazon Web Services Region.
- * This is the same identifier for both the CopyDBSnapshot
action that is called in the destination Amazon Web Services Region,
- * and the action contained in the presigned URL.
KmsKeyId
- The KMS key identifier for the KMS key to use to
+ * encrypt the copy of the DB snapshot in the destination Amazon Web Services Region. This is the
+ * same identifier for both the CopyDBSnapshot
operation that is
+ * called in the destination Amazon Web Services Region, and the operation contained in the
+ * presigned URL.
* @@ -2193,9 +2211,11 @@ export interface CopyDBSnapshotMessage { * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and * Signature Version 4 Signing Process.
*If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify
+ * SourceRegion
(or --source-region
for the CLI)
+ * instead of specifying PreSignedUrl
manually. Specifying
+ * SourceRegion
autogenerates a presigned URL that is a valid request
+ * for the operation that can run in the source Amazon Web Services Region.
The status of the VPC security group.
+ *The membership status of the VPC security group.
+ *Currently, the only valid status is active
.
A URL that contains a Signature Version 4 signed request for
- * the CreateDBCluster
action to be called in the source Amazon Web Services Region where the DB cluster is replicated from.
- * Specify PreSignedUrl
only when you are performing cross-Region replication from an encrypted DB cluster.
The pre-signed URL must be a valid request for the CreateDBCluster
API action
- * that can be executed in the source Amazon Web Services Region that contains the encrypted DB cluster to be copied.
The pre-signed URL request must contain the following parameter values:
+ *When you are replicating a DB cluster from one Amazon Web Services GovCloud (US) Region to another,
+ * an URL that contains a Signature Version 4 signed request for the
+ * CreateDBCluster
operation to be called in the source Amazon Web Services Region where
+ * the DB cluster is replicated from. Specify PreSignedUrl
only when you are
+ * performing cross-Region replication from an encrypted DB cluster.
The presigned URL must be a valid request for the CreateDBCluster
API
+ * operation that can run in the source Amazon Web Services Region that contains the encrypted DB
+ * cluster to copy.
The presigned URL request must contain the following parameter values:
*
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the KMS key to use to encrypt the copy of
- * the DB cluster in the destination Amazon Web Services Region. This should refer to the same KMS key for both the CreateDBCluster
- * action that is called in the destination Amazon Web Services Region, and the action contained in the pre-signed URL.
KmsKeyId
- The KMS key identifier for the KMS key to use to
+ * encrypt the copy of the DB cluster in the destination Amazon Web Services Region. This should
+ * refer to the same KMS key for both the CreateDBCluster
operation
+ * that is called in the destination Amazon Web Services Region, and the operation contained in
+ * the presigned URL.
* @@ -3805,9 +3832,11 @@ export interface CreateDBClusterMessage { * * Signature Version 4 Signing Process.
*If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion
(or --source-region
for the CLI)
- * instead of specifying PreSignedUrl
manually. Specifying SourceRegion
autogenerates a pre-signed URL that is a valid
- * request for the operation that can be executed in the source Amazon Web Services Region.
If you are using an Amazon Web Services SDK tool or the CLI, you can specify
+ * SourceRegion
(or --source-region
for the CLI)
+ * instead of specifying PreSignedUrl
manually. Specifying
+ * SourceRegion
autogenerates a presigned URL that is a valid request
+ * for the operation that can run in the source Amazon Web Services Region.
Valid for: Aurora DB clusters only
*/ @@ -3819,7 +3848,7 @@ export interface CreateDBClusterMessage { * enabled. *For more information, see * - * IAM Database Authentication in the Amazon Aurora User Guide..
+ * IAM Database Authentication in the Amazon Aurora User Guide. *Valid for: Aurora DB clusters only
*/ EnableIAMDatabaseAuthentication?: boolean; @@ -3871,6 +3900,7 @@ export interface CreateDBClusterMessage { *The global
engine mode isn't required for Aurora MySQL version 1.22 and higher 1.x versions,
* and global
engine mode isn't required for any 2.x versions.
The multimaster
engine mode only applies for DB clusters created with Aurora MySQL version 5.6.10a.
The serverless
engine mode only applies for Aurora Serverless v1 DB clusters.
For Aurora PostgreSQL, the global
engine mode isn't required, and both the parallelquery
* and the multimaster
engine modes currently aren't supported.
Limitations and requirements apply to some DB engine modes. For more information, see the @@ -3878,26 +3908,31 @@ export interface CreateDBClusterMessage { *
- * - * Limitations of Aurora Serverless v1 + * Limitations of Aurora + * Serverless v1 + *
+ *- * - * Limitations of Parallel Query + * Limitations of Parallel Query *
*- * - * Limitations of Aurora Global Databases + * Limitations of + * Aurora Global Databases *
*- * - * Limitations of Multi-Master Clusters + * Limitations of + * Multi-Master Clusters *
*The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*Valid for: Multi-AZ DB clusters only
*/ PerformanceInsightsRetentionPeriod?: number; @@ -4332,8 +4395,7 @@ export namespace ClusterPendingModifiedValues { */ export interface ScalingConfigurationInfo { /** - *The maximum capacity for the Aurora DB cluster in serverless
DB engine
- * mode.
The minimum capacity for an Aurora DB cluster in serverless
DB engine mode.
The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *This setting is only for non-Aurora Multi-AZ DB clusters.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -5819,11 +5908,13 @@ export interface CreateDBInstanceMessage { AllocatedStorage?: number; /** - *The compute and memory capacity of the DB instance, for example db.m4.large. + *
The compute and memory capacity of the DB instance, for example db.m5.large. * Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. - * For the full list of DB instance classes, - * and availability for your engine, see - * DB Instance Class in the Amazon RDS User Guide.
+ * For the full list of DB instance classes, and availability for your engine, see + * DB instance + * classes in the Amazon RDS User Guide or + * Aurora + * DB instance classes in the Amazon Aurora User Guide. */ DBInstanceClass: string | undefined; @@ -5983,7 +6074,8 @@ export interface CreateDBInstanceMessage { /** *A list of DB security groups to associate with this DB instance.
- *Default: The default DB security group for the database engine.
+ *This setting applies to the legacy EC2-Classic platform, which is no longer used to create
+ * new DB instances. Use the VpcSecurityGroupIds
setting instead.
Can't be set to 0 if the DB instance is a source to read replicas
*Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance
+ *Can't be set to 0 for an RDS Custom for Oracle DB instance
*A value that indicates whether the DB instance is a Multi-AZ deployment. You can't set
* the AvailabilityZone
parameter if the DB instance is a Multi-AZ deployment.
This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. DB instance Availability Zones (AZs) are managed by the DB cluster.
*/ MultiAZ?: boolean; /** *The version number of the database engine to use.
- *For a list of valid engine versions, use the DescribeDBEngineVersions
action.
For a list of valid engine versions, use the DescribeDBEngineVersions
+ * operation.
The following are the database engines and links to information about the major and minor versions that are available with * Amazon RDS. Not every database engine is available for every Amazon Web Services Region.
*@@ -6232,6 +6329,10 @@ export interface CreateDBInstanceMessage { *
Valid values: license-included
| bring-your-own-license
| general-public-license
*
This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable.
*/ LicenseModel?: string; @@ -6241,6 +6342,10 @@ export interface CreateDBInstanceMessage { *Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL DB instances, must be a multiple between .5 and 50 * of the storage amount for the DB instance. For SQL Server DB instances, must be a multiple between 1 and 50 * of the storage amount for the DB instance.
+ *+ * Amazon Aurora + *
+ *Not applicable. Storage is managed by the DB cluster.
*/ Iops?: number; @@ -6250,6 +6355,10 @@ export interface CreateDBInstanceMessage { * from an option group. Also, that option group can't be removed from a DB instance after it is * associated with a DB instance. *This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable.
*/ OptionGroupName?: string; @@ -6321,12 +6430,20 @@ export interface CreateDBInstanceMessage { *Default: io1
if the Iops
parameter
* is specified, otherwise gp2
*
+ * Amazon Aurora + *
+ *Not applicable. Storage is managed by the DB cluster.
*/ StorageType?: string; /** *The ARN from the key store with which to associate the instance for TDE encryption.
*This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable.
*/ TdeCredentialArn?: string; @@ -6376,6 +6493,10 @@ export interface CreateDBInstanceMessage { *For more information, see * Kerberos Authentication in the Amazon RDS User Guide.
*This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. The domain is managed by the DB cluster.
*/ Domain?: string; @@ -6413,6 +6534,10 @@ export interface CreateDBInstanceMessage { /** *Specify the name of the IAM role to be used when making API calls to the Directory Service.
*This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. The domain is managed by the DB cluster.
*/ DomainIAMRoleName?: string; @@ -6437,11 +6562,14 @@ export interface CreateDBInstanceMessage { /** *A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management * (IAM) accounts to database accounts. By default, mapping isn't enabled.
- *This setting doesn't apply to RDS Custom or Amazon Aurora. In Aurora, mapping Amazon Web Services IAM accounts - * to database accounts is managed by the DB cluster.
*For more information, see - * - * IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.
+ * + * IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide. + *This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. Mapping Amazon Web Services IAM accounts to database accounts is managed by the DB cluster.
*/ EnableIAMDatabaseAuthentication?: boolean; @@ -6463,7 +6591,35 @@ export interface CreateDBInstanceMessage { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*This setting doesn't apply to RDS Custom.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -6508,6 +6664,10 @@ export interface CreateDBInstanceMessage { /** *The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.
*This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable.
*/ ProcessorFeatures?: ProcessorFeature[]; @@ -6533,6 +6693,10 @@ export interface CreateDBInstanceMessage { * Managing capacity automatically with Amazon RDS storage autoscaling * in the Amazon RDS User Guide. *This setting doesn't apply to RDS Custom.
+ *+ * Amazon Aurora + *
+ *Not applicable. Storage is managed by the DB cluster.
*/ MaxAllocatedStorage?: number; @@ -7482,7 +7646,34 @@ export interface DBInstance { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *DBParameterGroup
of source DB instance for a same Region read
* replica, or the default DBParameterGroup
for the specified DB engine for a
* cross-Region read replica.
- * Specifying a parameter group for this operation is only supported for Oracle DB instances. It - * isn't supported for RDS Custom.
+ *Specifying a parameter group for this operation is only supported for MySQL and Oracle DB instances. + * It isn't supported for RDS Custom.
*Constraints:
*The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica
API action
- * in the source Amazon Web Services Region that contains the source DB instance.
When you are creating a read replica from one Amazon Web Services GovCloud (US) Region to another or
+ * from one China Amazon Web Services Region to another, the URL that contains a Signature Version 4
+ * signed request for the CreateDBInstanceReadReplica
API operation in the
+ * source Amazon Web Services Region that contains the source DB instance.
This setting applies only to Amazon Web Services GovCloud (US) Regions and + * China Amazon Web Services Regions. It's ignored in other Amazon Web Services Regions.
*You must specify this parameter when you create an encrypted read replica from
* another Amazon Web Services Region by using the Amazon RDS API. Don't specify
* PreSignedUrl
when you are creating an encrypted read replica in the
* same Amazon Web Services Region.
The presigned URL must be a valid request for the CreateDBInstanceReadReplica
API action
- * that can be executed in the source Amazon Web Services Region that contains the encrypted source DB instance.
- * The presigned URL request must contain the following parameter values:
The presigned URL must be a valid request for the
+ * CreateDBInstanceReadReplica
API operation that can run in the
+ * source Amazon Web Services Region that contains the encrypted source DB instance. The presigned URL
+ * request must contain the following parameter values:
* DestinationRegion
- The Amazon Web Services Region that the encrypted read
* replica is created in. This Amazon Web Services Region is the same one where the
- * CreateDBInstanceReadReplica
action is called that contains this presigned URL.
For example, if you create an encrypted DB instance in the us-west-1 Amazon Web Services Region,
- * from a source DB instance in the us-east-2 Amazon Web Services Region,
- * then you call the CreateDBInstanceReadReplica
action in
- * the us-east-1 Amazon Web Services Region and provide a presigned URL that contains a call to the
- * CreateDBInstanceReadReplica
action in the us-west-2 Amazon Web Services Region. For this
- * example, the DestinationRegion
in the presigned URL must be set to
- * the us-east-1 Amazon Web Services Region.
CreateDBInstanceReadReplica
operation is called that contains
+ * this presigned URL.
+ * For example, if you create an encrypted DB instance in the us-west-1
+ * Amazon Web Services Region, from a source DB instance in the us-east-2 Amazon Web Services Region, then you
+ * call the CreateDBInstanceReadReplica
operation in the us-east-1
+ * Amazon Web Services Region and provide a presigned URL that contains a call to the
+ * CreateDBInstanceReadReplica
operation in the us-west-2
+ * Amazon Web Services Region. For this example, the DestinationRegion
in the
+ * presigned URL must be set to the us-east-1 Amazon Web Services Region.
- * KmsKeyId
- The Amazon Web Services KMS key identifier for the key to use to
+ * KmsKeyId
- The KMS key identifier for the key to use to
* encrypt the read replica in the destination Amazon Web Services Region. This is the same
- * identifier for both the CreateDBInstanceReadReplica
action that is
- * called in the destination Amazon Web Services Region, and the action contained in the presigned
- * URL.
CreateDBInstanceReadReplica
operation that
+ * is called in the destination Amazon Web Services Region, and the operation contained in the
+ * presigned URL.
*
@@ -8095,9 +8292,9 @@ export interface CreateDBInstanceReadReplicaMessage {
* SourceRegion
(or --source-region
for the CLI)
* instead of specifying PreSignedUrl
manually. Specifying
* SourceRegion
autogenerates a presigned URL that is a valid request
- * for the operation that can be executed in the source Amazon Web Services Region.
- * SourceRegion
isn't supported for SQL Server, because SQL Server on Amazon RDS
+ * SourceRegion
isn't supported for SQL Server, because Amazon RDS for SQL Server
* doesn't support cross-Region read replicas.
This setting doesn't apply to RDS Custom.
@@ -8133,7 +8330,36 @@ export interface CreateDBInstanceReadReplicaMessage { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
+ * *This setting doesn't apply to RDS Custom.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -8538,8 +8764,8 @@ export interface CreateDBProxyRequest { /** *The kinds of databases that the proxy can connect to. * This value determines which database network protocol the proxy recognizes when it interprets - * network traffic to and from the database. - * The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.
+ * network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specifyMYSQL
.
+ * For Aurora PostgreSQL and RDS for PostgreSQL databases, specify POSTGRESQL
.
*/
EngineFamily: EngineFamily | string | undefined;
@@ -8675,7 +8901,9 @@ export interface DBProxy {
Status?: DBProxyStatus | string;
/**
- * The engine family applies to MySQL and PostgreSQL for both RDS and Aurora.
+ *The kinds of databases that the proxy can connect to. This value determines which database network protocol
+ * the proxy recognizes when it interprets network traffic to and from the database. MYSQL
supports Aurora MySQL,
+ * RDS for MariaDB, and RDS for MySQL databases. POSTGRESQL
supports Aurora PostgreSQL and RDS for PostgreSQL databases.
The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon - * Aurora will not create a database in the global database cluster you are creating.
+ *The name for your database of up to 64 alphanumeric characters. If you do not provide + * a name, Amazon Aurora will not create a database in the global database cluster you are + * creating.
*/ DatabaseName?: string; diff --git a/clients/client-rds/src/models/models_1.ts b/clients/client-rds/src/models/models_1.ts index 3738acca2f311..02e3a869c0c33 100644 --- a/clients/client-rds/src/models/models_1.ts +++ b/clients/client-rds/src/models/models_1.ts @@ -117,7 +117,6 @@ export namespace DBSnapshotMessage { export interface DescribeDBSnapshotsMessage { /** *The ID of the DB instance to retrieve the list of DB snapshots for.
- * This parameter can't be used in conjunction with DBSnapshotIdentifier
.
* This parameter isn't case-sensitive.
Constraints:
*A specific DB snapshot identifier to describe. This parameter can't be used in conjunction with DBInstanceIdentifier
.
+ *
A specific DB snapshot identifier to describe. * This value is stored as a lowercase string.
*Constraints:
*Contains the results of a successful invocation of the DescribeEventCategories
operation.
Contains the results of a successful invocation of the DescribeEventCategories + * operation.
*/ export interface EventCategoriesMap { /** @@ -870,7 +870,7 @@ export namespace DescribeEventsMessage { } /** - *This data type is used as a response element in the DescribeEvents
action.
This data type is used as a response element in the DescribeEvents action.
*/ export interface Event { /** @@ -1789,7 +1789,7 @@ export interface DescribeOrderableDBInstanceOptionsMessage { * a pagination token called a marker is included in the response so that * you can retrieve the remaining results. *Default: 100
- *Constraints: Minimum 20, maximum 100.
+ *Constraints: Minimum 20, maximum 10000.
*/ MaxRecords?: number; @@ -3766,7 +3766,35 @@ export interface ModifyDBClusterMessage { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*Valid for: Multi-AZ DB clusters only
*/ PerformanceInsightsRetentionPeriod?: number; @@ -3937,7 +3965,7 @@ export interface ModifyDBClusterSnapshotAttributeMessage { * set this value torestore
.
* To view the list of attributes available to modify, use the - * DescribeDBClusterSnapshotAttributes API action.
+ * DescribeDBClusterSnapshotAttributes API operation. *The new compute and memory capacity of the DB instance, for example db.m4.large. + *
The new compute and memory capacity of the DB instance, for example db.m5.large. * Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. - * For the full list of DB instance classes, - * and availability for your engine, see - * DB Instance Class in the Amazon RDS User Guide.
+ * For the full list of DB instance classes, and availability for your engine, see + * DB instance + * classes in the Amazon RDS User Guide or + * Aurora + * DB instance classes in the Amazon Aurora User Guide. *If you modify the DB instance class, an outage occurs during the change.
* The change is applied during the next maintenance window,
* unless ApplyImmediately
is enabled for this request.
Constraints: Must contain from 8 to 128 characters.
*Amazon RDS API actions never return the password, + *
Amazon RDS API operations never return the password, * so this action provides a way to regain access to a primary instance user if the password is lost. * This includes restoring privileges that might have been accidentally revoked.
*It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to - * read replicas. It can't be set to 0 or 35 for an RDS Custom for Oracle DB instance.
+ * read replicas. It can't be set to 0 for an RDS Custom for Oracle DB instance. *It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or @@ -4585,7 +4615,7 @@ export interface ModifyDBInstanceMessage { /** *
A value that indicates whether to enable Performance Insights for the DB instance.
*For more information, see - * Using Amazon Performance Insights in the Amazon RDS User Guide..
+ * Using Amazon Performance Insights in the Amazon RDS User Guide. *This setting doesn't apply to RDS Custom.
*/ EnablePerformanceInsights?: boolean; @@ -4601,7 +4631,35 @@ export interface ModifyDBInstanceMessage { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*This setting doesn't apply to RDS Custom.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -5023,12 +5081,12 @@ export namespace ConnectionPoolConfiguration { export interface ModifyDBProxyTargetGroupRequest { /** - *The name of the new target group to assign to the proxy.
+ *The name of the target group to modify.
*/ TargetGroupName: string | undefined; /** - *The name of the new proxy to which to assign the target group.
+ *The name of the proxy.
*/ DBProxyName: string | undefined; @@ -5165,7 +5223,7 @@ export interface ModifyDBSnapshotAttributeMessage { * set this value torestore
.
* To view the list of attributes available to modify, use the - * DescribeDBSnapshotAttributes API action.
+ * DescribeDBSnapshotAttributes API operation. *The name of the database engine to be used for this DB cluster.
- *Valid Values: aurora
(for MySQL 5.6-compatible Aurora), aurora-mysql
(for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql
- *
Valid Values: aurora
(for MySQL 5.6-compatible Aurora) and aurora-mysql
+ * (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)
* aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion"
*
To list all of the available engine versions for aurora-postgresql
, use the following command:
- * aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion"
- *
* Aurora MySQL *
- *Example: 5.6.10a
, 5.6.mysql_aurora.1.19.2
, 5.7.12
, 5.7.mysql_aurora.2.04.5
, 8.0.mysql_aurora.3.01.0
- *
- * Aurora PostgreSQL - *
- *Example: 9.6.3
, 10.7
+ *
Example: 5.6.10a
, 5.6.mysql_aurora.1.19.2
, 5.7.mysql_aurora.2.07.1
,
+ * 8.0.mysql_aurora.3.02.0
*
Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..
+ *For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
*For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
*Valid for: Aurora DB clusters and Multi-AZ DB clusters
*/ @@ -7283,7 +7333,7 @@ export interface RestoreDBClusterToPointInTimeMessage { * Aurora PostgreSQL * *Possible value is postgresql
.
For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide..
+ *For more information about exporting CloudWatch Logs for Amazon RDS, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.
*For more information about exporting CloudWatch Logs for Amazon Aurora, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.
*Valid for: Aurora DB clusters and Multi-AZ DB clusters
*/ @@ -8249,7 +8299,7 @@ export interface RestoreDBInstanceFromS3Message { /** *A value that indicates whether to enable Performance Insights for the DB instance.
*For more information, see - * Using Amazon Performance Insights in the Amazon RDS User Guide..
+ * Using Amazon Performance Insights in the Amazon RDS User Guide. */ EnablePerformanceInsights?: boolean; @@ -8263,7 +8313,35 @@ export interface RestoreDBInstanceFromS3Message { PerformanceInsightsKMSKeyId?: string; /** - *The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).
+ *The number of days to retain Performance Insights data. The default is 7 days. The following values are valid:
+ *7
+ *+ * month * 31, where month is a number of months from 1-23
+ *731
+ *For example, the following values are valid:
+ *93 (3 months * 31)
+ *341 (11 months * 31)
+ *589 (19 months * 31)
+ *731
+ *If you specify a retention period such as 94, which isn't a valid value, RDS issues an error.
*/ PerformanceInsightsRetentionPeriod?: number; @@ -9104,10 +9182,25 @@ export interface StartDBInstanceAutomatedBackupsReplicationMessage { KmsKeyId?: string; /** - *A URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication action to be - * called in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the - * StartDBInstanceAutomatedBackupsReplication API action that can be executed in the Amazon Web Services Region that contains - * the source DB instance.
+ *In an Amazon Web Services GovCloud (US) Region, an URL that contains a Signature Version 4 signed request
+ * for the StartDBInstanceAutomatedBackupsReplication
operation to call
+ * in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the
+ * StartDBInstanceAutomatedBackupsReplication
API operation that can run in
+ * the Amazon Web Services Region that contains the source DB instance.
This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other + * Amazon Web Services Regions.
+ *To learn how to generate a Signature Version 4 signed request, see + * + * Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and + * + * Signature Version 4 Signing Process.
+ *If you are using an Amazon Web Services SDK tool or the CLI, you can specify
+ * SourceRegion
(or --source-region
for the CLI)
+ * instead of specifying PreSignedUrl
manually. Specifying
+ * SourceRegion
autogenerates a presigned URL that is a valid request
+ * for the operation that can run in the source Amazon Web Services Region.
SageMaker Edge Manager dataplane service for communicating with active agents.
*/ export class SagemakerEdge extends SagemakerEdgeClient { + /** + *Use to get the active deployments from a device.
+ */ + public getDeployments( + args: GetDeploymentsCommandInput, + options?: __HttpHandlerOptions + ): PromiseUse to check if a device is registered with SageMaker Edge Manager.
*/ diff --git a/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts b/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts index cb2bd0a0a254e..74830c6e85e6f 100644 --- a/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts +++ b/clients/client-sagemaker-edge/src/SagemakerEdgeClient.ts @@ -53,6 +53,7 @@ import { UserAgent as __UserAgent, } from "@aws-sdk/types"; +import { GetDeploymentsCommandInput, GetDeploymentsCommandOutput } from "./commands/GetDeploymentsCommand"; import { GetDeviceRegistrationCommandInput, GetDeviceRegistrationCommandOutput, @@ -60,9 +61,15 @@ import { import { SendHeartbeatCommandInput, SendHeartbeatCommandOutput } from "./commands/SendHeartbeatCommand"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; -export type ServiceInputTypes = GetDeviceRegistrationCommandInput | SendHeartbeatCommandInput; +export type ServiceInputTypes = + | GetDeploymentsCommandInput + | GetDeviceRegistrationCommandInput + | SendHeartbeatCommandInput; -export type ServiceOutputTypes = GetDeviceRegistrationCommandOutput | SendHeartbeatCommandOutput; +export type ServiceOutputTypes = + | GetDeploymentsCommandOutput + | GetDeviceRegistrationCommandOutput + | SendHeartbeatCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** diff --git a/clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts b/clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts new file mode 100644 index 0000000000000..ee63ab1e58a95 --- /dev/null +++ b/clients/client-sagemaker-edge/src/commands/GetDeploymentsCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetDeploymentsRequest, GetDeploymentsResult } from "../models/models_0"; +import { + deserializeAws_restJson1GetDeploymentsCommand, + serializeAws_restJson1GetDeploymentsCommand, +} from "../protocols/Aws_restJson1"; +import { SagemakerEdgeClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SagemakerEdgeClient"; + +export interface GetDeploymentsCommandInput extends GetDeploymentsRequest {} +export interface GetDeploymentsCommandOutput extends GetDeploymentsResult, __MetadataBearer {} + +/** + *Use to get the active deployments from a device.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SagemakerEdgeClient, GetDeploymentsCommand } from "@aws-sdk/client-sagemaker-edge"; // ES Modules import + * // const { SagemakerEdgeClient, GetDeploymentsCommand } = require("@aws-sdk/client-sagemaker-edge"); // CommonJS import + * const client = new SagemakerEdgeClient(config); + * const command = new GetDeploymentsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetDeploymentsCommandInput} for command's `input` shape. + * @see {@link GetDeploymentsCommandOutput} for command's `response` shape. + * @see {@link SagemakerEdgeClientResolvedConfig | config} for SagemakerEdgeClient's `config` shape. + * + */ +export class GetDeploymentsCommand extends $Command< + GetDeploymentsCommandInput, + GetDeploymentsCommandOutput, + SagemakerEdgeClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetDeploymentsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe unique name of the device you want to get the registration status from.
+ *The unique name of the device you want to get the configuration of active deployments from.
*/ DeviceName: string | undefined; @@ -15,39 +15,144 @@ export interface GetDeviceRegistrationRequest { DeviceFleetName: string | undefined; } -export namespace GetDeviceRegistrationRequest { +export namespace GetDeploymentsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: GetDeviceRegistrationRequest): any => ({ + export const filterSensitiveLog = (obj: GetDeploymentsRequest): any => ({ ...obj, }); } -export interface GetDeviceRegistrationResult { +export enum ChecksumType { + Sha1 = "SHA1", +} + +/** + *Information about the checksum of a model deployed on a device.
+ */ +export interface Checksum { /** - *Describes if the device is currently registered with SageMaker Edge Manager.
+ *The type of the checksum.
*/ - DeviceRegistration?: string; + Type?: ChecksumType | string; /** - *The amount of time, in seconds, that the registration status is stored on the device’s cache before it is refreshed.
+ *The checksum of the model.
*/ - CacheTTL?: string; + Sum?: string; } -export namespace GetDeviceRegistrationResult { +export namespace Checksum { /** * @internal */ - export const filterSensitiveLog = (obj: GetDeviceRegistrationResult): any => ({ + export const filterSensitiveLog = (obj: Checksum): any => ({ + ...obj, + }); +} + +export enum ModelState { + Deploy = "DEPLOY", + Undeploy = "UNDEPLOY", +} + +/** + * + */ +export interface Definition { + /** + *The unique model handle.
+ */ + ModelHandle?: string; + + /** + *The absolute S3 location of the model.
+ */ + S3Url?: string; + + /** + *The checksum information of the model.
+ */ + Checksum?: Checksum; + + /** + *The desired state of the model.
+ */ + State?: ModelState | string; +} + +export namespace Definition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Definition): any => ({ + ...obj, + }); +} + +export enum FailureHandlingPolicy { + DoNothing = "DO_NOTHING", + RollbackOnFailure = "ROLLBACK_ON_FAILURE", +} + +export enum DeploymentType { + Model = "Model", +} + +/** + *Information about a deployment on an edge device that is registered with SageMaker Edge Manager.
+ */ +export interface EdgeDeployment { + /** + *The name and unique ID of the deployment.
+ */ + DeploymentName?: string; + + /** + *The type of the deployment.
+ */ + Type?: DeploymentType | string; + + /** + *Determines whether to rollback to previous configuration if deployment fails.
+ */ + FailureHandlingPolicy?: FailureHandlingPolicy | string; + + /** + *Returns a list of Definition objects.
+ */ + Definitions?: Definition[]; +} + +export namespace EdgeDeployment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EdgeDeployment): any => ({ + ...obj, + }); +} + +export interface GetDeploymentsResult { + /** + *Returns a list of the configurations of the active deployments on the device.
+ */ + Deployments?: EdgeDeployment[]; +} + +export namespace GetDeploymentsResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDeploymentsResult): any => ({ ...obj, }); } /** *An internal failure occurred. Try your request again. If the problem - * persists, contact AWS customer support.
+ * persists, contact Amazon Web Services customer support. */ export class InternalServiceException extends __BaseException { readonly name: "InternalServiceException" = "InternalServiceException"; @@ -67,6 +172,48 @@ export class InternalServiceException extends __BaseException { } } +export interface GetDeviceRegistrationRequest { + /** + *The unique name of the device you want to get the registration status from.
+ */ + DeviceName: string | undefined; + + /** + *The name of the fleet that the device belongs to.
+ */ + DeviceFleetName: string | undefined; +} + +export namespace GetDeviceRegistrationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDeviceRegistrationRequest): any => ({ + ...obj, + }); +} + +export interface GetDeviceRegistrationResult { + /** + *Describes if the device is currently registered with SageMaker Edge Manager.
+ */ + DeviceRegistration?: string; + + /** + *The amount of time, in seconds, that the registration status is stored on the device’s cache before it is refreshed.
+ */ + CacheTTL?: string; +} + +export namespace GetDeviceRegistrationResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDeviceRegistrationResult): any => ({ + ...obj, + }); +} + /** *Information required for edge device metrics.
*/ @@ -101,6 +248,109 @@ export namespace EdgeMetric { }); } +export enum DeploymentStatus { + Fail = "FAIL", + Success = "SUCCESS", +} + +/** + * + */ +export interface DeploymentModel { + /** + *The unique handle of the model.
+ */ + ModelHandle?: string; + + /** + *The name of the model.
+ */ + ModelName?: string; + + /** + *The version of the model.
+ */ + ModelVersion?: string; + + /** + *The desired state of the model.
+ */ + DesiredState?: ModelState | string; + + /** + *Returns the current state of the model.
+ */ + State?: ModelState | string; + + /** + *Returns the deployment status of the model.
+ */ + Status?: DeploymentStatus | string; + + /** + *Returns the error message for the deployment status result.
+ */ + StatusReason?: string; + + /** + *Returns the error message if there is a rollback.
+ */ + RollbackFailureReason?: string; +} + +export namespace DeploymentModel { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentModel): any => ({ + ...obj, + }); +} + +/** + *Information about the result of a deployment on an edge device that is registered with SageMaker Edge Manager.
+ */ +export interface DeploymentResult { + /** + *The name and unique ID of the deployment.
+ */ + DeploymentName?: string; + + /** + *Returns the bucket error code.
+ */ + DeploymentStatus?: string; + + /** + *Returns the detailed error message.
+ */ + DeploymentStatusMessage?: string; + + /** + *The timestamp of when the deployment was started on the agent.
+ */ + DeploymentStartTime?: Date; + + /** + *The timestamp of when the deployment was ended, and the agent got the deployment results.
+ */ + DeploymentEndTime?: Date; + + /** + *Returns a list of models deployed on the agent.
+ */ + DeploymentModels?: DeploymentModel[]; +} + +export namespace DeploymentResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentResult): any => ({ + ...obj, + }); +} + /** *Information about a model deployed on an edge device that is registered with SageMaker Edge Manager.
*/ @@ -165,6 +415,11 @@ export interface SendHeartbeatRequest { *The name of the fleet that the device belongs to.
*/ DeviceFleetName: string | undefined; + + /** + *Returns the result of a deployment on the device.
+ */ + DeploymentResult?: DeploymentResult; } export namespace SendHeartbeatRequest { diff --git a/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts b/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts index 9708ee997581d..2b3e569406c96 100644 --- a/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts +++ b/clients/client-sagemaker-edge/src/protocols/Aws_restJson1.ts @@ -13,14 +13,49 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; +import { GetDeploymentsCommandInput, GetDeploymentsCommandOutput } from "../commands/GetDeploymentsCommand"; import { GetDeviceRegistrationCommandInput, GetDeviceRegistrationCommandOutput, } from "../commands/GetDeviceRegistrationCommand"; import { SendHeartbeatCommandInput, SendHeartbeatCommandOutput } from "../commands/SendHeartbeatCommand"; -import { EdgeMetric, InternalServiceException, Model } from "../models/models_0"; +import { + Checksum, + Definition, + DeploymentModel, + DeploymentResult, + EdgeDeployment, + EdgeMetric, + InternalServiceException, + Model, +} from "../models/models_0"; import { SagemakerEdgeServiceException as __BaseException } from "../models/SagemakerEdgeServiceException"; +export const serializeAws_restJson1GetDeploymentsCommand = async ( + input: GetDeploymentsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetDeployments"; + let body: any; + body = JSON.stringify({ + ...(input.DeviceFleetName != null && { DeviceFleetName: input.DeviceFleetName }), + ...(input.DeviceName != null && { DeviceName: input.DeviceName }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1GetDeviceRegistrationCommand = async ( input: GetDeviceRegistrationCommandInput, context: __SerdeContext @@ -59,6 +94,9 @@ export const serializeAws_restJson1SendHeartbeatCommand = async ( body = JSON.stringify({ ...(input.AgentMetrics != null && { AgentMetrics: serializeAws_restJson1EdgeMetrics(input.AgentMetrics, context) }), ...(input.AgentVersion != null && { AgentVersion: input.AgentVersion }), + ...(input.DeploymentResult != null && { + DeploymentResult: serializeAws_restJson1DeploymentResult(input.DeploymentResult, context), + }), ...(input.DeviceFleetName != null && { DeviceFleetName: input.DeviceFleetName }), ...(input.DeviceName != null && { DeviceName: input.DeviceName }), ...(input.Models != null && { Models: serializeAws_restJson1Models(input.Models, context) }), @@ -74,6 +112,51 @@ export const serializeAws_restJson1SendHeartbeatCommand = async ( }); }; +export const deserializeAws_restJson1GetDeploymentsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseCreates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment configuration and devices.
+ */ + public createEdgeDeploymentPlan( + args: CreateEdgeDeploymentPlanCommandInput, + options?: __HttpHandlerOptions + ): PromiseCreates a new stage in an existing edge deployment plan.
+ */ + public createEdgeDeploymentStage( + args: CreateEdgeDeploymentStageCommandInput, + options?: __HttpHandlerOptions + ): PromiseStarts a SageMaker Edge Manager model packaging job. Edge Manager will use the model artifacts from the Amazon Simple Storage Service bucket that you specify. After the model has been packaged, Amazon SageMaker saves the resulting artifacts to an S3 bucket that you specify.
*/ @@ -3708,6 +3817,70 @@ export class SageMaker extends SageMakerClient { } } + /** + *Deletes an edge deployment plan if (and only if) all the stages in the plan are inactive or there are no stages in the plan.
+ */ + public deleteEdgeDeploymentPlan( + args: DeleteEdgeDeploymentPlanCommandInput, + options?: __HttpHandlerOptions + ): PromiseDelete a stage in an edge deployment plan if (and only if) the stage is inactive.
+ */ + public deleteEdgeDeploymentStage( + args: DeleteEdgeDeploymentStageCommandInput, + options?: __HttpHandlerOptions + ): PromiseDeletes an endpoint. SageMaker frees up all of the resources that were deployed when the * endpoint was created.
@@ -5070,6 +5243,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *Describes an edge deployment plan with deployment status per stage.
+ */ + public describeEdgeDeploymentPlan( + args: DescribeEdgeDeploymentPlanCommandInput, + options?: __HttpHandlerOptions + ): PromiseA description of edge packaging jobs.
*/ @@ -7020,6 +7225,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *Lists all edge deployment plans.
+ */ + public listEdgeDeploymentPlans( + args: ListEdgeDeploymentPlansCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns a list of edge packaging jobs.
*/ @@ -8009,6 +8246,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *Lists devices allocated to the stage, containing detailed device information and deployment status.
+ */ + public listStageDevices( + args: ListStageDevicesCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the Studio Lifecycle Configurations in your Amazon Web Services Account.
*/ @@ -8659,6 +8928,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *Starts a stage in an edge deployment plan.
+ */ + public startEdgeDeploymentStage( + args: StartEdgeDeploymentStageCommandInput, + options?: __HttpHandlerOptions + ): PromiseStarts a previously stopped monitoring schedule.
*Stops a stage in an edge deployment plan.
+ */ + public stopEdgeDeploymentStage( + args: StopEdgeDeploymentStageCommandInput, + options?: __HttpHandlerOptions + ): PromiseRequest to stop an edge packaging job.
*/ diff --git a/clients/client-sagemaker/src/SageMakerClient.ts b/clients/client-sagemaker/src/SageMakerClient.ts index 2f2e8ce358262..fde6bb98a9507 100644 --- a/clients/client-sagemaker/src/SageMakerClient.ts +++ b/clients/client-sagemaker/src/SageMakerClient.ts @@ -87,6 +87,14 @@ import { } from "./commands/CreateDataQualityJobDefinitionCommand"; import { CreateDeviceFleetCommandInput, CreateDeviceFleetCommandOutput } from "./commands/CreateDeviceFleetCommand"; import { CreateDomainCommandInput, CreateDomainCommandOutput } from "./commands/CreateDomainCommand"; +import { + CreateEdgeDeploymentPlanCommandInput, + CreateEdgeDeploymentPlanCommandOutput, +} from "./commands/CreateEdgeDeploymentPlanCommand"; +import { + CreateEdgeDeploymentStageCommandInput, + CreateEdgeDeploymentStageCommandOutput, +} from "./commands/CreateEdgeDeploymentStageCommand"; import { CreateEdgePackagingJobCommandInput, CreateEdgePackagingJobCommandOutput, @@ -192,6 +200,14 @@ import { } from "./commands/DeleteDataQualityJobDefinitionCommand"; import { DeleteDeviceFleetCommandInput, DeleteDeviceFleetCommandOutput } from "./commands/DeleteDeviceFleetCommand"; import { DeleteDomainCommandInput, DeleteDomainCommandOutput } from "./commands/DeleteDomainCommand"; +import { + DeleteEdgeDeploymentPlanCommandInput, + DeleteEdgeDeploymentPlanCommandOutput, +} from "./commands/DeleteEdgeDeploymentPlanCommand"; +import { + DeleteEdgeDeploymentStageCommandInput, + DeleteEdgeDeploymentStageCommandOutput, +} from "./commands/DeleteEdgeDeploymentStageCommand"; import { DeleteEndpointCommandInput, DeleteEndpointCommandOutput } from "./commands/DeleteEndpointCommand"; import { DeleteEndpointConfigCommandInput, @@ -284,6 +300,10 @@ import { DescribeDeviceFleetCommandOutput, } from "./commands/DescribeDeviceFleetCommand"; import { DescribeDomainCommandInput, DescribeDomainCommandOutput } from "./commands/DescribeDomainCommand"; +import { + DescribeEdgeDeploymentPlanCommandInput, + DescribeEdgeDeploymentPlanCommandOutput, +} from "./commands/DescribeEdgeDeploymentPlanCommand"; import { DescribeEdgePackagingJobCommandInput, DescribeEdgePackagingJobCommandOutput, @@ -467,6 +487,10 @@ import { import { ListDeviceFleetsCommandInput, ListDeviceFleetsCommandOutput } from "./commands/ListDeviceFleetsCommand"; import { ListDevicesCommandInput, ListDevicesCommandOutput } from "./commands/ListDevicesCommand"; import { ListDomainsCommandInput, ListDomainsCommandOutput } from "./commands/ListDomainsCommand"; +import { + ListEdgeDeploymentPlansCommandInput, + ListEdgeDeploymentPlansCommandOutput, +} from "./commands/ListEdgeDeploymentPlansCommand"; import { ListEdgePackagingJobsCommandInput, ListEdgePackagingJobsCommandOutput, @@ -549,6 +573,7 @@ import { import { ListPipelinesCommandInput, ListPipelinesCommandOutput } from "./commands/ListPipelinesCommand"; import { ListProcessingJobsCommandInput, ListProcessingJobsCommandOutput } from "./commands/ListProcessingJobsCommand"; import { ListProjectsCommandInput, ListProjectsCommandOutput } from "./commands/ListProjectsCommand"; +import { ListStageDevicesCommandInput, ListStageDevicesCommandOutput } from "./commands/ListStageDevicesCommand"; import { ListStudioLifecycleConfigsCommandInput, ListStudioLifecycleConfigsCommandOutput, @@ -592,6 +617,10 @@ import { SendPipelineExecutionStepSuccessCommandInput, SendPipelineExecutionStepSuccessCommandOutput, } from "./commands/SendPipelineExecutionStepSuccessCommand"; +import { + StartEdgeDeploymentStageCommandInput, + StartEdgeDeploymentStageCommandOutput, +} from "./commands/StartEdgeDeploymentStageCommand"; import { StartMonitoringScheduleCommandInput, StartMonitoringScheduleCommandOutput, @@ -606,6 +635,10 @@ import { } from "./commands/StartPipelineExecutionCommand"; import { StopAutoMLJobCommandInput, StopAutoMLJobCommandOutput } from "./commands/StopAutoMLJobCommand"; import { StopCompilationJobCommandInput, StopCompilationJobCommandOutput } from "./commands/StopCompilationJobCommand"; +import { + StopEdgeDeploymentStageCommandInput, + StopEdgeDeploymentStageCommandOutput, +} from "./commands/StopEdgeDeploymentStageCommand"; import { StopEdgePackagingJobCommandInput, StopEdgePackagingJobCommandOutput, @@ -707,6 +740,8 @@ export type ServiceInputTypes = | CreateDataQualityJobDefinitionCommandInput | CreateDeviceFleetCommandInput | CreateDomainCommandInput + | CreateEdgeDeploymentPlanCommandInput + | CreateEdgeDeploymentStageCommandInput | CreateEdgePackagingJobCommandInput | CreateEndpointCommandInput | CreateEndpointConfigCommandInput @@ -752,6 +787,8 @@ export type ServiceInputTypes = | DeleteDataQualityJobDefinitionCommandInput | DeleteDeviceFleetCommandInput | DeleteDomainCommandInput + | DeleteEdgeDeploymentPlanCommandInput + | DeleteEdgeDeploymentStageCommandInput | DeleteEndpointCommandInput | DeleteEndpointConfigCommandInput | DeleteExperimentCommandInput @@ -793,6 +830,7 @@ export type ServiceInputTypes = | DescribeDeviceCommandInput | DescribeDeviceFleetCommandInput | DescribeDomainCommandInput + | DescribeEdgeDeploymentPlanCommandInput | DescribeEdgePackagingJobCommandInput | DescribeEndpointCommandInput | DescribeEndpointConfigCommandInput @@ -853,6 +891,7 @@ export type ServiceInputTypes = | ListDeviceFleetsCommandInput | ListDevicesCommandInput | ListDomainsCommandInput + | ListEdgeDeploymentPlansCommandInput | ListEdgePackagingJobsCommandInput | ListEndpointConfigsCommandInput | ListEndpointsCommandInput @@ -884,6 +923,7 @@ export type ServiceInputTypes = | ListPipelinesCommandInput | ListProcessingJobsCommandInput | ListProjectsCommandInput + | ListStageDevicesCommandInput | ListStudioLifecycleConfigsCommandInput | ListSubscribedWorkteamsCommandInput | ListTagsCommandInput @@ -903,11 +943,13 @@ export type ServiceInputTypes = | SearchCommandInput | SendPipelineExecutionStepFailureCommandInput | SendPipelineExecutionStepSuccessCommandInput + | StartEdgeDeploymentStageCommandInput | StartMonitoringScheduleCommandInput | StartNotebookInstanceCommandInput | StartPipelineExecutionCommandInput | StopAutoMLJobCommandInput | StopCompilationJobCommandInput + | StopEdgeDeploymentStageCommandInput | StopEdgePackagingJobCommandInput | StopHyperParameterTuningJobCommandInput | StopInferenceRecommendationsJobCommandInput @@ -963,6 +1005,8 @@ export type ServiceOutputTypes = | CreateDataQualityJobDefinitionCommandOutput | CreateDeviceFleetCommandOutput | CreateDomainCommandOutput + | CreateEdgeDeploymentPlanCommandOutput + | CreateEdgeDeploymentStageCommandOutput | CreateEdgePackagingJobCommandOutput | CreateEndpointCommandOutput | CreateEndpointConfigCommandOutput @@ -1008,6 +1052,8 @@ export type ServiceOutputTypes = | DeleteDataQualityJobDefinitionCommandOutput | DeleteDeviceFleetCommandOutput | DeleteDomainCommandOutput + | DeleteEdgeDeploymentPlanCommandOutput + | DeleteEdgeDeploymentStageCommandOutput | DeleteEndpointCommandOutput | DeleteEndpointConfigCommandOutput | DeleteExperimentCommandOutput @@ -1049,6 +1095,7 @@ export type ServiceOutputTypes = | DescribeDeviceCommandOutput | DescribeDeviceFleetCommandOutput | DescribeDomainCommandOutput + | DescribeEdgeDeploymentPlanCommandOutput | DescribeEdgePackagingJobCommandOutput | DescribeEndpointCommandOutput | DescribeEndpointConfigCommandOutput @@ -1109,6 +1156,7 @@ export type ServiceOutputTypes = | ListDeviceFleetsCommandOutput | ListDevicesCommandOutput | ListDomainsCommandOutput + | ListEdgeDeploymentPlansCommandOutput | ListEdgePackagingJobsCommandOutput | ListEndpointConfigsCommandOutput | ListEndpointsCommandOutput @@ -1140,6 +1188,7 @@ export type ServiceOutputTypes = | ListPipelinesCommandOutput | ListProcessingJobsCommandOutput | ListProjectsCommandOutput + | ListStageDevicesCommandOutput | ListStudioLifecycleConfigsCommandOutput | ListSubscribedWorkteamsCommandOutput | ListTagsCommandOutput @@ -1159,11 +1208,13 @@ export type ServiceOutputTypes = | SearchCommandOutput | SendPipelineExecutionStepFailureCommandOutput | SendPipelineExecutionStepSuccessCommandOutput + | StartEdgeDeploymentStageCommandOutput | StartMonitoringScheduleCommandOutput | StartNotebookInstanceCommandOutput | StartPipelineExecutionCommandOutput | StopAutoMLJobCommandOutput | StopCompilationJobCommandOutput + | StopEdgeDeploymentStageCommandOutput | StopEdgePackagingJobCommandOutput | StopHyperParameterTuningJobCommandOutput | StopInferenceRecommendationsJobCommandOutput diff --git a/clients/client-sagemaker/src/commands/CreateEdgeDeploymentPlanCommand.ts b/clients/client-sagemaker/src/commands/CreateEdgeDeploymentPlanCommand.ts new file mode 100644 index 0000000000000..d081f928f378f --- /dev/null +++ b/clients/client-sagemaker/src/commands/CreateEdgeDeploymentPlanCommand.ts @@ -0,0 +1,96 @@ +// smithy-typescript generated code +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateEdgeDeploymentPlanRequest, CreateEdgeDeploymentPlanResponse } from "../models/models_0"; +import { + deserializeAws_json1_1CreateEdgeDeploymentPlanCommand, + serializeAws_json1_1CreateEdgeDeploymentPlanCommand, +} from "../protocols/Aws_json1_1"; +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; + +export interface CreateEdgeDeploymentPlanCommandInput extends CreateEdgeDeploymentPlanRequest {} +export interface CreateEdgeDeploymentPlanCommandOutput extends CreateEdgeDeploymentPlanResponse, __MetadataBearer {} + +/** + *Creates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment configuration and devices.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, CreateEdgeDeploymentPlanCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, CreateEdgeDeploymentPlanCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new CreateEdgeDeploymentPlanCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateEdgeDeploymentPlanCommandInput} for command's `input` shape. + * @see {@link CreateEdgeDeploymentPlanCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class CreateEdgeDeploymentPlanCommand extends $Command< + CreateEdgeDeploymentPlanCommandInput, + CreateEdgeDeploymentPlanCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateEdgeDeploymentPlanCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackCreates a new stage in an existing edge deployment plan.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, CreateEdgeDeploymentStageCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, CreateEdgeDeploymentStageCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new CreateEdgeDeploymentStageCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateEdgeDeploymentStageCommandInput} for command's `input` shape. + * @see {@link CreateEdgeDeploymentStageCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class CreateEdgeDeploymentStageCommand extends $Command< + CreateEdgeDeploymentStageCommandInput, + CreateEdgeDeploymentStageCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateEdgeDeploymentStageCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDeletes an edge deployment plan if (and only if) all the stages in the plan are inactive or there are no stages in the plan.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, DeleteEdgeDeploymentPlanCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, DeleteEdgeDeploymentPlanCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new DeleteEdgeDeploymentPlanCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteEdgeDeploymentPlanCommandInput} for command's `input` shape. + * @see {@link DeleteEdgeDeploymentPlanCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class DeleteEdgeDeploymentPlanCommand extends $Command< + DeleteEdgeDeploymentPlanCommandInput, + DeleteEdgeDeploymentPlanCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteEdgeDeploymentPlanCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDelete a stage in an edge deployment plan if (and only if) the stage is inactive.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, DeleteEdgeDeploymentStageCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, DeleteEdgeDeploymentStageCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new DeleteEdgeDeploymentStageCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteEdgeDeploymentStageCommandInput} for command's `input` shape. + * @see {@link DeleteEdgeDeploymentStageCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class DeleteEdgeDeploymentStageCommand extends $Command< + DeleteEdgeDeploymentStageCommandInput, + DeleteEdgeDeploymentStageCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteEdgeDeploymentStageCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDescribes an edge deployment plan with deployment status per stage.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, DescribeEdgeDeploymentPlanCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, DescribeEdgeDeploymentPlanCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new DescribeEdgeDeploymentPlanCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeEdgeDeploymentPlanCommandInput} for command's `input` shape. + * @see {@link DescribeEdgeDeploymentPlanCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class DescribeEdgeDeploymentPlanCommand extends $Command< + DescribeEdgeDeploymentPlanCommandInput, + DescribeEdgeDeploymentPlanCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeEdgeDeploymentPlanCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists all edge deployment plans.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, ListEdgeDeploymentPlansCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, ListEdgeDeploymentPlansCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new ListEdgeDeploymentPlansCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListEdgeDeploymentPlansCommandInput} for command's `input` shape. + * @see {@link ListEdgeDeploymentPlansCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class ListEdgeDeploymentPlansCommand extends $Command< + ListEdgeDeploymentPlansCommandInput, + ListEdgeDeploymentPlansCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListEdgeDeploymentPlansCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists devices allocated to the stage, containing detailed device information and deployment status.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, ListStageDevicesCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, ListStageDevicesCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new ListStageDevicesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListStageDevicesCommandInput} for command's `input` shape. + * @see {@link ListStageDevicesCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class ListStageDevicesCommand extends $Command< + ListStageDevicesCommandInput, + ListStageDevicesCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListStageDevicesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackStarts a stage in an edge deployment plan.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, StartEdgeDeploymentStageCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, StartEdgeDeploymentStageCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new StartEdgeDeploymentStageCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StartEdgeDeploymentStageCommandInput} for command's `input` shape. + * @see {@link StartEdgeDeploymentStageCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class StartEdgeDeploymentStageCommand extends $Command< + StartEdgeDeploymentStageCommandInput, + StartEdgeDeploymentStageCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartEdgeDeploymentStageCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackStops a stage in an edge deployment plan.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, StopEdgeDeploymentStageCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, StopEdgeDeploymentStageCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new StopEdgeDeploymentStageCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StopEdgeDeploymentStageCommandInput} for command's `input` shape. + * @see {@link StopEdgeDeploymentStageCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for SageMakerClient's `config` shape. + * + */ +export class StopEdgeDeploymentStageCommand extends $Command< + StopEdgeDeploymentStageCommandInput, + StopEdgeDeploymentStageCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StopEdgeDeploymentStageCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackContains information about the configuration of a model in a deployment.
+ */ +export interface EdgeDeploymentModelConfig { + /** + *The name the device application uses to reference this model.
+ */ + ModelHandle: string | undefined; + + /** + *The edge packaging job associated with this deployment.
+ */ + EdgePackagingJobName: string | undefined; +} + +export namespace EdgeDeploymentModelConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EdgeDeploymentModelConfig): any => ({ + ...obj, + }); +} + +export enum FailureHandlingPolicy { + DoNothing = "DO_NOTHING", + RollbackOnFailure = "ROLLBACK_ON_FAILURE", +} + +/** + *Contains information about the configuration of a deployment.
+ */ +export interface EdgeDeploymentConfig { + /** + *Toggle that determines whether to rollback to previous configuration if the current deployment fails. + * By default this is turned on. You may turn this off if you want to investigate the errors yourself.
+ */ + FailureHandlingPolicy: FailureHandlingPolicy | string | undefined; +} + +export namespace EdgeDeploymentConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EdgeDeploymentConfig): any => ({ + ...obj, + }); +} + +export enum DeviceSubsetType { + NameContains = "NAMECONTAINS", + Percentage = "PERCENTAGE", + Selection = "SELECTION", +} + +/** + *Contains information about the configurations of selected devices.
+ */ +export interface DeviceSelectionConfig { + /** + *Type of device subsets to deploy to the current stage.
+ */ + DeviceSubsetType: DeviceSubsetType | string | undefined; + + /** + *Percentage of devices in the fleet to deploy to the current stage.
+ */ + Percentage?: number; + + /** + *List of devices chosen to deploy.
+ */ + DeviceNames?: string[]; + + /** + *A filter to select devices with names containing this name.
+ */ + DeviceNameContains?: string; +} + +export namespace DeviceSelectionConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeviceSelectionConfig): any => ({ + ...obj, + }); +} + +/** + *Contains information about a stage in an edge deployment plan.
+ */ +export interface DeploymentStage { + /** + *The name of the stage.
+ */ + StageName: string | undefined; + + /** + *Configuration of the devices in the stage.
+ */ + DeviceSelectionConfig: DeviceSelectionConfig | undefined; + + /** + *Configuration of the deployment details.
+ */ + DeploymentConfig?: EdgeDeploymentConfig; +} + +export namespace DeploymentStage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentStage): any => ({ + ...obj, + }); +} + +export interface CreateEdgeDeploymentPlanRequest { + /** + *The name of the edge deployment plan.
+ */ + EdgeDeploymentPlanName: string | undefined; + + /** + *List of models associated with the edge deployment plan.
+ */ + ModelConfigs: EdgeDeploymentModelConfig[] | undefined; + + /** + *The device fleet used for this edge deployment plan.
+ */ + DeviceFleetName: string | undefined; + + /** + *List of stages of the edge deployment plan. The number of stages is limited to 10 per deployment.
+ */ + Stages?: DeploymentStage[]; + + /** + *List of tags with which to tag the edge deployment plan.
+ */ + Tags?: Tag[]; +} + +export namespace CreateEdgeDeploymentPlanRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEdgeDeploymentPlanRequest): any => ({ + ...obj, + }); +} + +export interface CreateEdgeDeploymentPlanResponse { + /** + *The ARN of the edge deployment plan.
+ */ + EdgeDeploymentPlanArn: string | undefined; +} + +export namespace CreateEdgeDeploymentPlanResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEdgeDeploymentPlanResponse): any => ({ + ...obj, + }); +} + +export interface CreateEdgeDeploymentStageRequest { + /** + *The name of the edge deployment plan.
+ */ + EdgeDeploymentPlanName: string | undefined; + + /** + *List of stages to be added to the edge deployment plan.
+ */ + Stages: DeploymentStage[] | undefined; +} + +export namespace CreateEdgeDeploymentStageRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEdgeDeploymentStageRequest): any => ({ + ...obj, + }); +} + export interface CreateEdgePackagingJobRequest { /** *The name of the edge packaging job.
@@ -11549,516 +11740,3 @@ export namespace TuningJobCompletionCriteria { ...obj, }); } - -/** - *Configures a hyperparameter tuning job.
- */ -export interface HyperParameterTuningJobConfig { - /** - *Specifies how hyperparameter tuning chooses the combinations of hyperparameter values
- * to use for the training job it launches. To use the Bayesian search strategy, set this
- * to Bayesian
. To randomly search, set it to Random
. For
- * information about search strategies, see How
- * Hyperparameter Tuning Works.
The HyperParameterTuningJobObjective object that specifies the - * objective - * metric for this tuning job.
- */ - HyperParameterTuningJobObjective?: HyperParameterTuningJobObjective; - - /** - *The ResourceLimits object that specifies the - * maximum - * number of training jobs and parallel training jobs for this tuning - * job.
- */ - ResourceLimits: ResourceLimits | undefined; - - /** - *The ParameterRanges object that specifies the ranges of - * hyperparameters - * that this tuning job searches.
- */ - ParameterRanges?: ParameterRanges; - - /** - *Specifies whether to use early stopping for training jobs launched by the
- * hyperparameter tuning job. This can be one of the following values (the default value is
- * OFF
):
Training jobs launched by the hyperparameter tuning job do not use early - * stopping.
- *SageMaker stops training jobs launched by the hyperparameter tuning job when - * they are unlikely to perform better than previously completed training jobs. - * For more information, see Stop Training Jobs Early.
- *The tuning job's completion criteria.
- */ - TuningJobCompletionCriteria?: TuningJobCompletionCriteria; -} - -export namespace HyperParameterTuningJobConfig { - /** - * @internal - */ - export const filterSensitiveLog = (obj: HyperParameterTuningJobConfig): any => ({ - ...obj, - }); -} - -/** - *Specifies - * which - * training algorithm to use for training jobs that a hyperparameter - * tuning job launches and the metrics to monitor.
- */ -export interface HyperParameterAlgorithmSpecification { - /** - * The registry path of the Docker image that contains the training algorithm. For
- * information about Docker registry paths for built-in algorithms, see Algorithms
- * Provided by Amazon SageMaker: Common Parameters. SageMaker supports both
- * registry/repository[:tag]
and registry/repository[@digest]
- * image path formats. For more information, see Using Your Own Algorithms with Amazon
- * SageMaker.
The training input mode that the algorithm supports. For more information about input modes, see - * Algorithms.
- * - *- * Pipe mode - *
- *If an algorithm supports Pipe
mode, Amazon SageMaker streams data directly
- * from Amazon S3 to the container.
- * File mode - *
- *If an algorithm supports File
mode, SageMaker
- * downloads the training data from S3 to the provisioned ML storage volume, and mounts the
- * directory to the Docker volume for the training container.
You must provision the ML storage volume with sufficient capacity - * to accommodate the data downloaded from S3. In addition to the training data, the ML - * storage volume also stores the output model. The algorithm container uses the ML storage - * volume to also store intermediate information, if any.
- *For distributed algorithms, training data is distributed uniformly. - * Your training duration is predictable if the input data objects sizes are - * approximately the same. SageMaker does not split the files any further for model training. - * If the object sizes are skewed, training won't be optimal as the data distribution is also - * skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in - * training.
- * - *- * FastFile mode - *
- *If an algorithm supports FastFile
mode, SageMaker streams data directly
- * from S3 to the container with no code changes, and provides file system access to
- * the data. Users can author their training script to interact with these files as if
- * they were stored on disk.
- * FastFile
mode works best when the data is read sequentially.
- * Augmented manifest files aren't supported.
- * The startup time is lower when there are fewer files in the S3 bucket provided.
The name of the resource algorithm to use for the hyperparameter tuning job. If you
- * specify a value for this parameter, do not specify a value for
- * TrainingImage
.
An array of MetricDefinition objects that specify the - * metrics - * that the algorithm emits.
- */ - MetricDefinitions?: MetricDefinition[]; -} - -export namespace HyperParameterAlgorithmSpecification { - /** - * @internal - */ - export const filterSensitiveLog = (obj: HyperParameterAlgorithmSpecification): any => ({ - ...obj, - }); -} - -/** - *The retry strategy to use when a training job fails due to an
- * InternalServerError
. RetryStrategy
is specified as part of
- * the CreateTrainingJob
and CreateHyperParameterTuningJob
- * requests. You can add the StoppingCondition
parameter to the request to
- * limit the training time for the complete job.
The number of times to retry the job. When the job is retried, it's
- * SecondaryStatus
is changed to STARTING
.
Defines - * the training jobs launched by a hyperparameter tuning job.
- */ -export interface HyperParameterTrainingJobDefinition { - /** - *The job definition name.
- */ - DefinitionName?: string; - - /** - *Defines the objective metric for a hyperparameter tuning job.
- * Hyperparameter
- * tuning uses the value of this metric to evaluate the training jobs it launches, and
- * returns the training job that results in either the highest or lowest value for this
- * metric, depending on the value you specify for the Type
- * parameter.
Specifies ranges of integer, continuous, and categorical hyperparameters that a - * hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs - * with hyperparameter values within these ranges to find the combination of values that - * result in the training job with the best performance as measured by the objective metric - * of the hyperparameter tuning job.
- *The maximum number of items specified for Array Members
refers to
- * the maximum number of hyperparameters for each range and also the maximum for the
- * hyperparameter tuning job itself. That is, the sum of the number of hyperparameters
- * for all the ranges can't exceed the maximum number specified.
Specifies the values of hyperparameters - * that - * do not change for the tuning job.
- */ - StaticHyperParameters?: RecordThe HyperParameterAlgorithmSpecification object that - * specifies - * the resource algorithm to use for the training jobs that the tuning - * job launches.
- */ - AlgorithmSpecification: HyperParameterAlgorithmSpecification | undefined; - - /** - *The Amazon Resource Name (ARN) of the - * IAM - * role associated with the training jobs that the tuning job - * launches.
- */ - RoleArn: string | undefined; - - /** - *An array of Channel objects that specify - * the - * input for the training jobs that the tuning job launches.
- */ - InputDataConfig?: Channel[]; - - /** - *The VpcConfig object that - * specifies - * the VPC that you want the training jobs that this hyperparameter - * tuning job launches to connect to. Control access to and from your - * training - * container by configuring the VPC. For more information, see Protect Training Jobs - * by Using an Amazon Virtual Private Cloud.
- */ - VpcConfig?: VpcConfig; - - /** - *Specifies the path to the Amazon S3 bucket where you - * store - * model artifacts from the training jobs that the tuning job - * launches.
- */ - OutputDataConfig: OutputDataConfig | undefined; - - /** - *The resources, - * including - * the compute instances and storage volumes, to use for the training - * jobs that the tuning job launches.
- *Storage volumes store model artifacts and
- * incremental
- * states. Training algorithms might also use storage volumes for
- * scratch
- * space. If you want SageMaker to use the storage volume to store the
- * training data, choose File
as the TrainingInputMode
in the
- * algorithm specification. For distributed training algorithms, specify an instance count
- * greater than 1.
Specifies a limit to how long a model hyperparameter training job can run. It also - * specifies how long a managed spot training job has to complete. When the job reaches the - * time limit, SageMaker ends the training job. Use this API to cap model training costs.
- */ - StoppingCondition: StoppingCondition | undefined; - - /** - *Isolates the training container. No inbound or outbound network calls can be made, - * except for calls between peers within a training cluster for distributed training. If - * network isolation is used for training jobs that are configured to use a VPC, SageMaker - * downloads and uploads customer data and model artifacts through the specified VPC, but - * the training container does not have network access.
- */ - EnableNetworkIsolation?: boolean; - - /** - *To encrypt all communications between ML compute instances in distributed training,
- * choose True
. Encryption provides greater security for distributed training,
- * but training might take longer. How long it takes depends on the amount of communication
- * between compute instances, especially if you use a deep learning algorithm in
- * distributed training.
A Boolean indicating whether managed spot training is enabled (True
) or
- * not (False
).
Contains information about the output location for managed spot training checkpoint - * data.
- */ - CheckpointConfig?: CheckpointConfig; - - /** - *The number of times to retry the job when the job fails due to an
- * InternalServerError
.
A previously completed or stopped hyperparameter tuning job to be used as a starting - * point for a new hyperparameter tuning job.
- */ -export interface ParentHyperParameterTuningJob { - /** - *The name of the hyperparameter tuning job to be used as a starting point for a new - * hyperparameter tuning job.
- */ - HyperParameterTuningJobName?: string; -} - -export namespace ParentHyperParameterTuningJob { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ParentHyperParameterTuningJob): any => ({ - ...obj, - }); -} - -export enum HyperParameterTuningJobWarmStartType { - IDENTICAL_DATA_AND_ALGORITHM = "IdenticalDataAndAlgorithm", - TRANSFER_LEARNING = "TransferLearning", -} - -/** - *Specifies the configuration for a hyperparameter tuning job that uses one or more - * previous hyperparameter tuning jobs as a starting point. The results of previous tuning - * jobs are used to inform which combinations of hyperparameters to search over in the new - * tuning job.
- *All training jobs launched by the new hyperparameter tuning job are evaluated by using - * the objective metric, and the training job that performs the best is compared to the - * best training jobs from the parent tuning jobs. From these, the training job that - * performs the best as measured by the objective metric is returned as the overall best - * training job.
- *All training jobs launched by parent hyperparameter tuning jobs and the new - * hyperparameter tuning jobs count against the limit of training jobs for the tuning - * job.
- *An array of hyperparameter tuning jobs that are used as the starting point for the new - * hyperparameter tuning job. For more information about warm starting a hyperparameter - * tuning job, see Using a Previous - * Hyperparameter Tuning Job as a Starting Point.
- *Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent - * jobs for warm start tuning jobs.
- */ - ParentHyperParameterTuningJobs: ParentHyperParameterTuningJob[] | undefined; - - /** - *Specifies one of the following:
- *The new hyperparameter tuning job uses the same input data and training - * image as the parent tuning jobs. You can change the hyperparameter ranges to - * search and the maximum number of training jobs that the hyperparameter - * tuning job launches. You cannot use a new version of the training algorithm, - * unless the changes in the new version do not affect the algorithm itself. - * For example, changes that improve logging or adding support for a different - * data format are allowed. You can also change hyperparameters from tunable to - * static, and from static to tunable, but the total number of static plus - * tunable hyperparameters must remain the same as it is in all parent jobs. - * The objective metric for the new tuning job must be the same as for all - * parent jobs.
- *The new hyperparameter tuning job can include input data, hyperparameter - * ranges, maximum number of concurrent training jobs, and maximum number of - * training jobs that are different than those of its parent hyperparameter - * tuning jobs. The training image can also be a different version from the - * version used in the parent hyperparameter tuning job. You can also change - * hyperparameters from tunable to static, and from static to tunable, but the - * total number of static plus tunable hyperparameters must remain the same as - * it is in all parent jobs. The objective metric for the new tuning job must - * be the same as for all parent jobs.
- *The name of the tuning job. This name is the prefix for the names of all training jobs - * that this tuning job launches. The name must be unique within the same Amazon Web Services account and - * Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, - * and : + = @ _ % - (hyphen). The name is not case sensitive.
- */ - HyperParameterTuningJobName: string | undefined; - - /** - *The HyperParameterTuningJobConfig object that describes the tuning - * job, including the search strategy, the objective metric used to evaluate training jobs, - * ranges of parameters to search, and resource limits for the tuning job. For more - * information, see How - * Hyperparameter Tuning Works.
- */ - HyperParameterTuningJobConfig: HyperParameterTuningJobConfig | undefined; - - /** - *The HyperParameterTrainingJobDefinition object that describes the - * training jobs that this tuning job launches, including static hyperparameters, input - * data configuration, output data configuration, resource configuration, and stopping - * condition.
- */ - TrainingJobDefinition?: HyperParameterTrainingJobDefinition; - - /** - *A list of the HyperParameterTrainingJobDefinition objects launched - * for this tuning job.
- */ - TrainingJobDefinitions?: HyperParameterTrainingJobDefinition[]; - - /** - *Specifies the configuration for starting the hyperparameter tuning job using one or - * more previous tuning jobs as a starting point. The results of previous tuning jobs are - * used to inform which combinations of hyperparameters to search over in the new tuning - * job.
- *All training jobs launched by the new hyperparameter tuning job are evaluated by using
- * the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM
as the
- * WarmStartType
value for the warm start configuration, the training job
- * that performs the best in the new tuning job is compared to the best training jobs from
- * the parent tuning jobs. From these, the training job that performs the best as measured
- * by the objective metric is returned as the overall best training job.
All training jobs launched by parent hyperparameter tuning jobs and the new - * hyperparameter tuning jobs count against the limit of training jobs for the tuning - * job.
- *An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in - * different ways, for example, by purpose, owner, or environment. For more information, - * see Tagging Amazon Web Services - * Resources.
- *Tags that you specify for the tuning job are also added to all training jobs that the - * tuning job launches.
- */ - Tags?: Tag[]; -} - -export namespace CreateHyperParameterTuningJobRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: CreateHyperParameterTuningJobRequest): any => ({ - ...obj, - }); -} - -export interface CreateHyperParameterTuningJobResponse { - /** - *The Amazon Resource Name (ARN) of the tuning job. SageMaker assigns an ARN to a - * hyperparameter tuning job when you create it.
- */ - HyperParameterTuningJobArn: string | undefined; -} - -export namespace CreateHyperParameterTuningJobResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: CreateHyperParameterTuningJobResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-sagemaker/src/models/models_1.ts b/clients/client-sagemaker/src/models/models_1.ts index 6cf88a38f605b..c32a839669425 100644 --- a/clients/client-sagemaker/src/models/models_1.ts +++ b/clients/client-sagemaker/src/models/models_1.ts @@ -47,7 +47,10 @@ import { DataQualityBaselineConfig, DataQualityJobInput, DeploymentConfig, + DeviceSelectionConfig, DomainSettings, + EdgeDeploymentConfig, + EdgeDeploymentModelConfig, EdgeOutputConfig, EdgePresetDeploymentType, EndpointInput, @@ -58,14 +61,13 @@ import { HumanLoopActivationConfig, HumanLoopConfig, HumanLoopRequestSource, - HyperParameterTrainingJobDefinition, - HyperParameterTuningJobConfig, - HyperParameterTuningJobObjectiveType, - HyperParameterTuningJobWarmStartConfig, + HyperParameterTuningJobObjective, + HyperParameterTuningJobStrategyType, InferenceSpecification, InputConfig, KernelGatewayImageConfig, MetadataProperties, + MetricDefinition, MetricsSource, ModelApprovalStatus, ModelDeployConfig, @@ -76,11 +78,11 @@ import { MonitoringStatisticsResource, MonitoringStoppingCondition, NeoVpcConfig, - ObjectiveStatus, OfflineStoreConfig, OnlineStoreConfig, OutputConfig, OutputDataConfig, + ParameterRanges, ProblemType, ProcessingInstanceType, ProcessingS3DataDistributionType, @@ -92,20 +94,536 @@ import { ProductionVariantServerlessConfig, PublicWorkforceTaskPrice, ResourceConfig, + ResourceLimits, ResourceSpec, - RetryStrategy, StoppingCondition, Tag, + TrainingInputMode, + TrainingJobEarlyStoppingType, TrainingSpecification, TransformInput, TransformJobDefinition, TransformOutput, TransformResources, + TuningJobCompletionCriteria, UserContext, UserSettings, VpcConfig, } from "./models_0"; +/** + *Configures a hyperparameter tuning job.
+ */ +export interface HyperParameterTuningJobConfig { + /** + *Specifies how hyperparameter tuning chooses the combinations of hyperparameter values
+ * to use for the training job it launches. To use the Bayesian search strategy, set this
+ * to Bayesian
. To randomly search, set it to Random
. For
+ * information about search strategies, see How
+ * Hyperparameter Tuning Works.
The HyperParameterTuningJobObjective object that specifies the + * objective + * metric for this tuning job.
+ */ + HyperParameterTuningJobObjective?: HyperParameterTuningJobObjective; + + /** + *The ResourceLimits object that specifies the + * maximum + * number of training jobs and parallel training jobs for this tuning + * job.
+ */ + ResourceLimits: ResourceLimits | undefined; + + /** + *The ParameterRanges object that specifies the ranges of + * hyperparameters + * that this tuning job searches.
+ */ + ParameterRanges?: ParameterRanges; + + /** + *Specifies whether to use early stopping for training jobs launched by the
+ * hyperparameter tuning job. This can be one of the following values (the default value is
+ * OFF
):
Training jobs launched by the hyperparameter tuning job do not use early + * stopping.
+ *SageMaker stops training jobs launched by the hyperparameter tuning job when + * they are unlikely to perform better than previously completed training jobs. + * For more information, see Stop Training Jobs Early.
+ *The tuning job's completion criteria.
+ */ + TuningJobCompletionCriteria?: TuningJobCompletionCriteria; +} + +export namespace HyperParameterTuningJobConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: HyperParameterTuningJobConfig): any => ({ + ...obj, + }); +} + +/** + *Specifies + * which + * training algorithm to use for training jobs that a hyperparameter + * tuning job launches and the metrics to monitor.
+ */ +export interface HyperParameterAlgorithmSpecification { + /** + * The registry path of the Docker image that contains the training algorithm. For
+ * information about Docker registry paths for built-in algorithms, see Algorithms
+ * Provided by Amazon SageMaker: Common Parameters. SageMaker supports both
+ * registry/repository[:tag]
and registry/repository[@digest]
+ * image path formats. For more information, see Using Your Own Algorithms with Amazon
+ * SageMaker.
The training input mode that the algorithm supports. For more information about input modes, see + * Algorithms.
+ * + *+ * Pipe mode + *
+ *If an algorithm supports Pipe
mode, Amazon SageMaker streams data directly
+ * from Amazon S3 to the container.
+ * File mode + *
+ *If an algorithm supports File
mode, SageMaker
+ * downloads the training data from S3 to the provisioned ML storage volume, and mounts the
+ * directory to the Docker volume for the training container.
You must provision the ML storage volume with sufficient capacity + * to accommodate the data downloaded from S3. In addition to the training data, the ML + * storage volume also stores the output model. The algorithm container uses the ML storage + * volume to also store intermediate information, if any.
+ *For distributed algorithms, training data is distributed uniformly. + * Your training duration is predictable if the input data objects sizes are + * approximately the same. SageMaker does not split the files any further for model training. + * If the object sizes are skewed, training won't be optimal as the data distribution is also + * skewed when one host in a training cluster is overloaded, thus becoming a bottleneck in + * training.
+ * + *+ * FastFile mode + *
+ *If an algorithm supports FastFile
mode, SageMaker streams data directly
+ * from S3 to the container with no code changes, and provides file system access to
+ * the data. Users can author their training script to interact with these files as if
+ * they were stored on disk.
+ * FastFile
mode works best when the data is read sequentially.
+ * Augmented manifest files aren't supported.
+ * The startup time is lower when there are fewer files in the S3 bucket provided.
The name of the resource algorithm to use for the hyperparameter tuning job. If you
+ * specify a value for this parameter, do not specify a value for
+ * TrainingImage
.
An array of MetricDefinition objects that specify the + * metrics + * that the algorithm emits.
+ */ + MetricDefinitions?: MetricDefinition[]; +} + +export namespace HyperParameterAlgorithmSpecification { + /** + * @internal + */ + export const filterSensitiveLog = (obj: HyperParameterAlgorithmSpecification): any => ({ + ...obj, + }); +} + +/** + *The retry strategy to use when a training job fails due to an
+ * InternalServerError
. RetryStrategy
is specified as part of
+ * the CreateTrainingJob
and CreateHyperParameterTuningJob
+ * requests. You can add the StoppingCondition
parameter to the request to
+ * limit the training time for the complete job.
The number of times to retry the job. When the job is retried, it's
+ * SecondaryStatus
is changed to STARTING
.
Defines + * the training jobs launched by a hyperparameter tuning job.
+ */ +export interface HyperParameterTrainingJobDefinition { + /** + *The job definition name.
+ */ + DefinitionName?: string; + + /** + *Defines the objective metric for a hyperparameter tuning job.
+ * Hyperparameter
+ * tuning uses the value of this metric to evaluate the training jobs it launches, and
+ * returns the training job that results in either the highest or lowest value for this
+ * metric, depending on the value you specify for the Type
+ * parameter.
Specifies ranges of integer, continuous, and categorical hyperparameters that a + * hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs + * with hyperparameter values within these ranges to find the combination of values that + * result in the training job with the best performance as measured by the objective metric + * of the hyperparameter tuning job.
+ *The maximum number of items specified for Array Members
refers to
+ * the maximum number of hyperparameters for each range and also the maximum for the
+ * hyperparameter tuning job itself. That is, the sum of the number of hyperparameters
+ * for all the ranges can't exceed the maximum number specified.
Specifies the values of hyperparameters + * that + * do not change for the tuning job.
+ */ + StaticHyperParameters?: RecordThe HyperParameterAlgorithmSpecification object that + * specifies + * the resource algorithm to use for the training jobs that the tuning + * job launches.
+ */ + AlgorithmSpecification: HyperParameterAlgorithmSpecification | undefined; + + /** + *The Amazon Resource Name (ARN) of the + * IAM + * role associated with the training jobs that the tuning job + * launches.
+ */ + RoleArn: string | undefined; + + /** + *An array of Channel objects that specify + * the + * input for the training jobs that the tuning job launches.
+ */ + InputDataConfig?: Channel[]; + + /** + *The VpcConfig object that + * specifies + * the VPC that you want the training jobs that this hyperparameter + * tuning job launches to connect to. Control access to and from your + * training + * container by configuring the VPC. For more information, see Protect Training Jobs + * by Using an Amazon Virtual Private Cloud.
+ */ + VpcConfig?: VpcConfig; + + /** + *Specifies the path to the Amazon S3 bucket where you + * store + * model artifacts from the training jobs that the tuning job + * launches.
+ */ + OutputDataConfig: OutputDataConfig | undefined; + + /** + *The resources, + * including + * the compute instances and storage volumes, to use for the training + * jobs that the tuning job launches.
+ *Storage volumes store model artifacts and
+ * incremental
+ * states. Training algorithms might also use storage volumes for
+ * scratch
+ * space. If you want SageMaker to use the storage volume to store the
+ * training data, choose File
as the TrainingInputMode
in the
+ * algorithm specification. For distributed training algorithms, specify an instance count
+ * greater than 1.
Specifies a limit to how long a model hyperparameter training job can run. It also + * specifies how long a managed spot training job has to complete. When the job reaches the + * time limit, SageMaker ends the training job. Use this API to cap model training costs.
+ */ + StoppingCondition: StoppingCondition | undefined; + + /** + *Isolates the training container. No inbound or outbound network calls can be made, + * except for calls between peers within a training cluster for distributed training. If + * network isolation is used for training jobs that are configured to use a VPC, SageMaker + * downloads and uploads customer data and model artifacts through the specified VPC, but + * the training container does not have network access.
+ */ + EnableNetworkIsolation?: boolean; + + /** + *To encrypt all communications between ML compute instances in distributed training,
+ * choose True
. Encryption provides greater security for distributed training,
+ * but training might take longer. How long it takes depends on the amount of communication
+ * between compute instances, especially if you use a deep learning algorithm in
+ * distributed training.
A Boolean indicating whether managed spot training is enabled (True
) or
+ * not (False
).
Contains information about the output location for managed spot training checkpoint + * data.
+ */ + CheckpointConfig?: CheckpointConfig; + + /** + *The number of times to retry the job when the job fails due to an
+ * InternalServerError
.
A previously completed or stopped hyperparameter tuning job to be used as a starting + * point for a new hyperparameter tuning job.
+ */ +export interface ParentHyperParameterTuningJob { + /** + *The name of the hyperparameter tuning job to be used as a starting point for a new + * hyperparameter tuning job.
+ */ + HyperParameterTuningJobName?: string; +} + +export namespace ParentHyperParameterTuningJob { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ParentHyperParameterTuningJob): any => ({ + ...obj, + }); +} + +export enum HyperParameterTuningJobWarmStartType { + IDENTICAL_DATA_AND_ALGORITHM = "IdenticalDataAndAlgorithm", + TRANSFER_LEARNING = "TransferLearning", +} + +/** + *Specifies the configuration for a hyperparameter tuning job that uses one or more + * previous hyperparameter tuning jobs as a starting point. The results of previous tuning + * jobs are used to inform which combinations of hyperparameters to search over in the new + * tuning job.
+ *All training jobs launched by the new hyperparameter tuning job are evaluated by using + * the objective metric, and the training job that performs the best is compared to the + * best training jobs from the parent tuning jobs. From these, the training job that + * performs the best as measured by the objective metric is returned as the overall best + * training job.
+ *All training jobs launched by parent hyperparameter tuning jobs and the new + * hyperparameter tuning jobs count against the limit of training jobs for the tuning + * job.
+ *An array of hyperparameter tuning jobs that are used as the starting point for the new + * hyperparameter tuning job. For more information about warm starting a hyperparameter + * tuning job, see Using a Previous + * Hyperparameter Tuning Job as a Starting Point.
+ *Hyperparameter tuning jobs created before October 1, 2018 cannot be used as parent + * jobs for warm start tuning jobs.
+ */ + ParentHyperParameterTuningJobs: ParentHyperParameterTuningJob[] | undefined; + + /** + *Specifies one of the following:
+ *The new hyperparameter tuning job uses the same input data and training + * image as the parent tuning jobs. You can change the hyperparameter ranges to + * search and the maximum number of training jobs that the hyperparameter + * tuning job launches. You cannot use a new version of the training algorithm, + * unless the changes in the new version do not affect the algorithm itself. + * For example, changes that improve logging or adding support for a different + * data format are allowed. You can also change hyperparameters from tunable to + * static, and from static to tunable, but the total number of static plus + * tunable hyperparameters must remain the same as it is in all parent jobs. + * The objective metric for the new tuning job must be the same as for all + * parent jobs.
+ *The new hyperparameter tuning job can include input data, hyperparameter + * ranges, maximum number of concurrent training jobs, and maximum number of + * training jobs that are different than those of its parent hyperparameter + * tuning jobs. The training image can also be a different version from the + * version used in the parent hyperparameter tuning job. You can also change + * hyperparameters from tunable to static, and from static to tunable, but the + * total number of static plus tunable hyperparameters must remain the same as + * it is in all parent jobs. The objective metric for the new tuning job must + * be the same as for all parent jobs.
+ *The name of the tuning job. This name is the prefix for the names of all training jobs + * that this tuning job launches. The name must be unique within the same Amazon Web Services account and + * Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, + * and : + = @ _ % - (hyphen). The name is not case sensitive.
+ */ + HyperParameterTuningJobName: string | undefined; + + /** + *The HyperParameterTuningJobConfig object that describes the tuning + * job, including the search strategy, the objective metric used to evaluate training jobs, + * ranges of parameters to search, and resource limits for the tuning job. For more + * information, see How + * Hyperparameter Tuning Works.
+ */ + HyperParameterTuningJobConfig: HyperParameterTuningJobConfig | undefined; + + /** + *The HyperParameterTrainingJobDefinition object that describes the + * training jobs that this tuning job launches, including static hyperparameters, input + * data configuration, output data configuration, resource configuration, and stopping + * condition.
+ */ + TrainingJobDefinition?: HyperParameterTrainingJobDefinition; + + /** + *A list of the HyperParameterTrainingJobDefinition objects launched + * for this tuning job.
+ */ + TrainingJobDefinitions?: HyperParameterTrainingJobDefinition[]; + + /** + *Specifies the configuration for starting the hyperparameter tuning job using one or + * more previous tuning jobs as a starting point. The results of previous tuning jobs are + * used to inform which combinations of hyperparameters to search over in the new tuning + * job.
+ *All training jobs launched by the new hyperparameter tuning job are evaluated by using
+ * the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM
as the
+ * WarmStartType
value for the warm start configuration, the training job
+ * that performs the best in the new tuning job is compared to the best training jobs from
+ * the parent tuning jobs. From these, the training job that performs the best as measured
+ * by the objective metric is returned as the overall best training job.
All training jobs launched by parent hyperparameter tuning jobs and the new + * hyperparameter tuning jobs count against the limit of training jobs for the tuning + * job.
+ *An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in + * different ways, for example, by purpose, owner, or environment. For more information, + * see Tagging Amazon Web Services + * Resources.
+ *Tags that you specify for the tuning job are also added to all training jobs that the + * tuning job launches.
+ */ + Tags?: Tag[]; +} + +export namespace CreateHyperParameterTuningJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateHyperParameterTuningJobRequest): any => ({ + ...obj, + }); +} + +export interface CreateHyperParameterTuningJobResponse { + /** + *The Amazon Resource Name (ARN) of the tuning job. SageMaker assigns an ARN to a + * hyperparameter tuning job when you create it.
+ */ + HyperParameterTuningJobArn: string | undefined; +} + +export namespace CreateHyperParameterTuningJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateHyperParameterTuningJobResponse): any => ({ + ...obj, + }); +} + export interface CreateImageRequest { /** *The description of the image.
@@ -7561,30 +8079,67 @@ export namespace DeleteDomainRequest { }); } -export interface DeleteEndpointInput { +export interface DeleteEdgeDeploymentPlanRequest { /** - *The name of the endpoint that you want to delete.
+ *The name of the edge deployment plan to delete.
*/ - EndpointName: string | undefined; + EdgeDeploymentPlanName: string | undefined; } -export namespace DeleteEndpointInput { +export namespace DeleteEdgeDeploymentPlanRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DeleteEndpointInput): any => ({ + export const filterSensitiveLog = (obj: DeleteEdgeDeploymentPlanRequest): any => ({ ...obj, }); } -export interface DeleteEndpointConfigInput { +export interface DeleteEdgeDeploymentStageRequest { /** - *The name of the endpoint configuration that you want to delete.
+ *The name of the edge deployment plan from which the stage will be deleted.
*/ - EndpointConfigName: string | undefined; -} + EdgeDeploymentPlanName: string | undefined; -export namespace DeleteEndpointConfigInput { + /** + *The name of the stage.
+ */ + StageName: string | undefined; +} + +export namespace DeleteEdgeDeploymentStageRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEdgeDeploymentStageRequest): any => ({ + ...obj, + }); +} + +export interface DeleteEndpointInput { + /** + *The name of the endpoint that you want to delete.
+ */ + EndpointName: string | undefined; +} + +export namespace DeleteEndpointInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEndpointInput): any => ({ + ...obj, + }); +} + +export interface DeleteEndpointConfigInput { + /** + *The name of the endpoint configuration that you want to delete.
+ */ + EndpointConfigName: string | undefined; +} + +export namespace DeleteEndpointConfigInput { /** * @internal */ @@ -8202,6 +8757,95 @@ export namespace DeployedImage { }); } +export enum StageStatus { + Creating = "CREATING", + Deployed = "DEPLOYED", + Failed = "FAILED", + InProgress = "INPROGRESS", + ReadyToDeploy = "READYTODEPLOY", + Starting = "STARTING", + Stopped = "STOPPED", + Stopping = "STOPPING", +} + +/** + *Contains information summarizing the deployment stage results.
+ */ +export interface EdgeDeploymentStatus { + /** + *The general status of the current stage.
+ */ + StageStatus: StageStatus | string | undefined; + + /** + *The number of edge devices with the successful deployment in the current stage.
+ */ + EdgeDeploymentSuccessInStage: number | undefined; + + /** + *The number of edge devices yet to pick up the deployment in current stage, or in progress.
+ */ + EdgeDeploymentPendingInStage: number | undefined; + + /** + *The number of edge devices that failed the deployment in current stage.
+ */ + EdgeDeploymentFailedInStage: number | undefined; + + /** + *A detailed message about deployment status in current stage.
+ */ + EdgeDeploymentStatusMessage?: string; + + /** + *The time when the deployment API started.
+ */ + EdgeDeploymentStageStartTime?: Date; +} + +export namespace EdgeDeploymentStatus { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EdgeDeploymentStatus): any => ({ + ...obj, + }); +} + +/** + *Contains information summarizing the deployment stage results.
+ */ +export interface DeploymentStageStatusSummary { + /** + *The name of the stage.
+ */ + StageName: string | undefined; + + /** + *Configuration of the devices in the stage.
+ */ + DeviceSelectionConfig: DeviceSelectionConfig | undefined; + + /** + *Configuration of the deployment details.
+ */ + DeploymentConfig: EdgeDeploymentConfig | undefined; + + /** + *General status of the current state.
+ */ + DeploymentStatus: EdgeDeploymentStatus | undefined; +} + +export namespace DeploymentStageStatusSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeploymentStageStatusSummary): any => ({ + ...obj, + }); +} + export interface DeregisterDevicesRequest { /** *The name of the fleet the devices belong to.
@@ -9570,6 +10214,98 @@ export namespace DescribeDomainResponse { }); } +export interface DescribeEdgeDeploymentPlanRequest { + /** + *The name of the deployment plan to describe.
+ */ + EdgeDeploymentPlanName: string | undefined; + + /** + *If the edge deployment plan has enough stages to require tokening, then this is the response from the last list of stages returned.
+ */ + NextToken?: string; + + /** + *The maximum number of results to select (50 by default).
+ */ + MaxResults?: number; +} + +export namespace DescribeEdgeDeploymentPlanRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeEdgeDeploymentPlanRequest): any => ({ + ...obj, + }); +} + +export interface DescribeEdgeDeploymentPlanResponse { + /** + *The ARN of edge deployment plan.
+ */ + EdgeDeploymentPlanArn: string | undefined; + + /** + *The name of the edge deployment plan.
+ */ + EdgeDeploymentPlanName: string | undefined; + + /** + *List of models associated with the edge deployment plan.
+ */ + ModelConfigs: EdgeDeploymentModelConfig[] | undefined; + + /** + *The device fleet used for this edge deployment plan.
+ */ + DeviceFleetName: string | undefined; + + /** + *The number of edge devices with the successful deployment.
+ */ + EdgeDeploymentSuccess?: number; + + /** + *The number of edge devices yet to pick up deployment, or in progress.
+ */ + EdgeDeploymentPending?: number; + + /** + *The number of edge devices that failed the deployment.
+ */ + EdgeDeploymentFailed?: number; + + /** + *List of stages in the edge deployment plan.
+ */ + Stages: DeploymentStageStatusSummary[] | undefined; + + /** + *Token to use when calling the next set of stages in the edge deployment plan.
+ */ + NextToken?: string; + + /** + *The time when the edge deployment plan was created.
+ */ + CreationTime?: Date; + + /** + *The time when the edge deployment plan was last updated.
+ */ + LastModifiedTime?: Date; +} + +export namespace DescribeEdgeDeploymentPlanResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeEdgeDeploymentPlanResponse): any => ({ + ...obj, + }); +} + export interface DescribeEdgePackagingJobRequest { /** *The name of the edge packaging job.
@@ -10755,590 +11491,3 @@ export namespace DescribeHumanTaskUiResponse { ...obj, }); } - -export interface DescribeHyperParameterTuningJobRequest { - /** - *The name of the tuning job.
- */ - HyperParameterTuningJobName: string | undefined; -} - -export namespace DescribeHyperParameterTuningJobRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeHyperParameterTuningJobRequest): any => ({ - ...obj, - }); -} - -/** - *Shows the final value for the
- * objective
- * metric for a training job that was launched by a hyperparameter
- * tuning job. You define the objective metric in the
- * HyperParameterTuningJobObjective
parameter of HyperParameterTuningJobConfig.
Whether to - * minimize - * or maximize the objective metric. Valid values are Minimize and - * Maximize.
- */ - Type?: HyperParameterTuningJobObjectiveType | string; - - /** - *The name of the - * objective - * metric.
- */ - MetricName: string | undefined; - - /** - *The value of the objective metric.
- */ - Value: number | undefined; -} - -export namespace FinalHyperParameterTuningJobObjectiveMetric { - /** - * @internal - */ - export const filterSensitiveLog = (obj: FinalHyperParameterTuningJobObjectiveMetric): any => ({ - ...obj, - }); -} - -export enum TrainingJobStatus { - COMPLETED = "Completed", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - STOPPED = "Stopped", - STOPPING = "Stopping", -} - -/** - *The container for the summary information about a training job.
- */ -export interface HyperParameterTrainingJobSummary { - /** - *The training job definition name.
- */ - TrainingJobDefinitionName?: string; - - /** - *The name of the training job.
- */ - TrainingJobName: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the training job.
- */ - TrainingJobArn: string | undefined; - - /** - *The HyperParameter tuning job that launched the training job.
- */ - TuningJobName?: string; - - /** - *The date and time that the training job was created.
- */ - CreationTime: Date | undefined; - - /** - *The date and time that the training job started.
- */ - TrainingStartTime?: Date; - - /** - *Specifies the time when the training job ends on training instances. You are billed
- * for the time interval between the value of TrainingStartTime
and this time.
- * For successful jobs and stopped jobs, this is the time after model artifacts are
- * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
The - * status - * of the training job.
- */ - TrainingJobStatus: TrainingJobStatus | string | undefined; - - /** - *A - * list of the hyperparameters for which you specified ranges to - * search.
- */ - TunedHyperParameters: RecordThe - * reason that the training job failed. - *
- */ - FailureReason?: string; - - /** - *The FinalHyperParameterTuningJobObjectiveMetric object that - * specifies the - * value - * of the - * objective - * metric of the tuning job that launched this training job.
- */ - FinalHyperParameterTuningJobObjectiveMetric?: FinalHyperParameterTuningJobObjectiveMetric; - - /** - *The status of the objective metric for the training job:
- *Succeeded: The - * final - * objective metric for the training job was evaluated by the - * hyperparameter tuning job and - * used - * in the hyperparameter tuning process.
- *Pending: The training job is in progress and evaluation of its final objective - * metric is pending.
- *Failed: - * The final objective metric for the training job was not evaluated, and was not - * used in the hyperparameter tuning process. This typically occurs when the - * training job failed or did not emit an objective - * metric.
- *Specifies the number of training jobs that this hyperparameter tuning job launched, - * categorized by the status of their objective metric. The objective metric status shows - * whether the - * final - * objective metric for the training job has been evaluated by the - * tuning job and used in the hyperparameter tuning process.
- */ -export interface ObjectiveStatusCounters { - /** - *The number of training jobs whose final objective metric was evaluated by the - * hyperparameter tuning job and used in the hyperparameter tuning process.
- */ - Succeeded?: number; - - /** - *The number of training jobs that are in progress and pending evaluation of their final - * objective metric.
- */ - Pending?: number; - - /** - *The number of training jobs whose final objective metric was not evaluated and used in - * the hyperparameter tuning process. This typically occurs when the training job failed or - * did not emit an objective metric.
- */ - Failed?: number; -} - -export namespace ObjectiveStatusCounters { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ObjectiveStatusCounters): any => ({ - ...obj, - }); -} - -/** - *The numbers of training jobs launched by a hyperparameter tuning job, categorized by - * status.
- */ -export interface TrainingJobStatusCounters { - /** - *The number of completed training jobs launched by the hyperparameter tuning - * job.
- */ - Completed?: number; - - /** - *The number of in-progress training jobs launched by a hyperparameter tuning - * job.
- */ - InProgress?: number; - - /** - *The number of training jobs that failed, but can be retried. A failed training job can - * be retried only if it failed because an internal service error occurred.
- */ - RetryableError?: number; - - /** - *The number of training jobs that failed and can't be retried. A failed training job - * can't be retried if it failed because a client error occurred.
- */ - NonRetryableError?: number; - - /** - *The number of training jobs launched by a hyperparameter tuning job that were - * manually - * stopped.
- */ - Stopped?: number; -} - -export namespace TrainingJobStatusCounters { - /** - * @internal - */ - export const filterSensitiveLog = (obj: TrainingJobStatusCounters): any => ({ - ...obj, - }); -} - -export interface DescribeHyperParameterTuningJobResponse { - /** - *The name of the tuning job.
- */ - HyperParameterTuningJobName: string | undefined; - - /** - *The - * Amazon Resource Name (ARN) of the tuning job.
- */ - HyperParameterTuningJobArn: string | undefined; - - /** - *The HyperParameterTuningJobConfig object that specifies the - * configuration of the tuning job.
- */ - HyperParameterTuningJobConfig: HyperParameterTuningJobConfig | undefined; - - /** - *The HyperParameterTrainingJobDefinition object that specifies the - * definition of the training jobs that this tuning job launches.
- */ - TrainingJobDefinition?: HyperParameterTrainingJobDefinition; - - /** - *A list of the HyperParameterTrainingJobDefinition objects launched - * for this tuning job.
- */ - TrainingJobDefinitions?: HyperParameterTrainingJobDefinition[]; - - /** - *The status of the tuning job: InProgress, Completed, Failed, Stopping, or - * Stopped.
- */ - HyperParameterTuningJobStatus: HyperParameterTuningJobStatus | string | undefined; - - /** - *The date and time that the tuning job started.
- */ - CreationTime: Date | undefined; - - /** - *The date and time that the tuning job ended.
- */ - HyperParameterTuningEndTime?: Date; - - /** - *The date and time that the status of the tuning job was modified.
- */ - LastModifiedTime?: Date; - - /** - *The TrainingJobStatusCounters object that specifies the number of - * training jobs, categorized by status, that this tuning job launched.
- */ - TrainingJobStatusCounters: TrainingJobStatusCounters | undefined; - - /** - *The ObjectiveStatusCounters object that specifies the number of - * training jobs, categorized by the status of their final objective metric, that this - * tuning job launched.
- */ - ObjectiveStatusCounters: ObjectiveStatusCounters | undefined; - - /** - *A TrainingJobSummary object that describes the training job that - * completed with the best current HyperParameterTuningJobObjective.
- */ - BestTrainingJob?: HyperParameterTrainingJobSummary; - - /** - *If the hyperparameter tuning job is an warm start tuning job with a
- * WarmStartType
of IDENTICAL_DATA_AND_ALGORITHM
, this is the
- * TrainingJobSummary for the training job with the best objective
- * metric value of all training jobs launched by this tuning job and all parent jobs
- * specified for the warm start tuning job.
The configuration for starting the hyperparameter parameter tuning job using one or - * more previous tuning jobs as a starting point. The results of previous tuning jobs are - * used to inform which combinations of hyperparameters to search over in the new tuning - * job.
- */ - WarmStartConfig?: HyperParameterTuningJobWarmStartConfig; - - /** - *If the tuning job failed, the reason it failed.
- */ - FailureReason?: string; -} - -export namespace DescribeHyperParameterTuningJobResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeHyperParameterTuningJobResponse): any => ({ - ...obj, - }); -} - -export interface DescribeImageRequest { - /** - *The name of the image to describe.
- */ - ImageName: string | undefined; -} - -export namespace DescribeImageRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeImageRequest): any => ({ - ...obj, - }); -} - -export enum ImageStatus { - CREATED = "CREATED", - CREATE_FAILED = "CREATE_FAILED", - CREATING = "CREATING", - DELETE_FAILED = "DELETE_FAILED", - DELETING = "DELETING", - UPDATE_FAILED = "UPDATE_FAILED", - UPDATING = "UPDATING", -} - -export interface DescribeImageResponse { - /** - *When the image was created.
- */ - CreationTime?: Date; - - /** - *The description of the image.
- */ - Description?: string; - - /** - *The name of the image as displayed.
- */ - DisplayName?: string; - - /** - *When a create, update, or delete operation fails, the reason for the failure.
- */ - FailureReason?: string; - - /** - *The Amazon Resource Name (ARN) of the image.
- */ - ImageArn?: string; - - /** - *The name of the image.
- */ - ImageName?: string; - - /** - *The status of the image.
- */ - ImageStatus?: ImageStatus | string; - - /** - *When the image was last modified.
- */ - LastModifiedTime?: Date; - - /** - *The Amazon Resource Name (ARN) of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.
- */ - RoleArn?: string; -} - -export namespace DescribeImageResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeImageResponse): any => ({ - ...obj, - }); -} - -export interface DescribeImageVersionRequest { - /** - *The name of the image.
- */ - ImageName: string | undefined; - - /** - *The version of the image. If not specified, the latest version is described.
- */ - Version?: number; -} - -export namespace DescribeImageVersionRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeImageVersionRequest): any => ({ - ...obj, - }); -} - -export enum ImageVersionStatus { - CREATED = "CREATED", - CREATE_FAILED = "CREATE_FAILED", - CREATING = "CREATING", - DELETE_FAILED = "DELETE_FAILED", - DELETING = "DELETING", -} - -export interface DescribeImageVersionResponse { - /** - *The registry path of the container image on which this image version is based.
- */ - BaseImage?: string; - - /** - *The registry path of the container image that contains this image version.
- */ - ContainerImage?: string; - - /** - *When the version was created.
- */ - CreationTime?: Date; - - /** - *When a create or delete operation fails, the reason for the failure.
- */ - FailureReason?: string; - - /** - *The Amazon Resource Name (ARN) of the image the version is based on.
- */ - ImageArn?: string; - - /** - *The ARN of the version.
- */ - ImageVersionArn?: string; - - /** - *The status of the version.
- */ - ImageVersionStatus?: ImageVersionStatus | string; - - /** - *When the version was last modified.
- */ - LastModifiedTime?: Date; - - /** - *The version number.
- */ - Version?: number; -} - -export namespace DescribeImageVersionResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeImageVersionResponse): any => ({ - ...obj, - }); -} - -export interface DescribeInferenceRecommendationsJobRequest { - /** - *The name of the job. The name must be unique within an - * Amazon Web Services Region in the Amazon Web Services account.
- */ - JobName: string | undefined; -} - -export namespace DescribeInferenceRecommendationsJobRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeInferenceRecommendationsJobRequest): any => ({ - ...obj, - }); -} - -/** - *The endpoint configuration made by Inference Recommender during a recommendation job.
- */ -export interface EndpointOutputConfiguration { - /** - *The name of the endpoint made during a recommendation job.
- */ - EndpointName: string | undefined; - - /** - *The name of the production variant (deployed model) made during a recommendation job.
- */ - VariantName: string | undefined; - - /** - *The instance type recommended by Amazon SageMaker Inference Recommender.
- */ - InstanceType: ProductionVariantInstanceType | string | undefined; - - /** - *The number of instances recommended to launch initially.
- */ - InitialInstanceCount: number | undefined; -} - -export namespace EndpointOutputConfiguration { - /** - * @internal - */ - export const filterSensitiveLog = (obj: EndpointOutputConfiguration): any => ({ - ...obj, - }); -} diff --git a/clients/client-sagemaker/src/models/models_2.ts b/clients/client-sagemaker/src/models/models_2.ts index 38495b0644841..8b24b76153230 100644 --- a/clients/client-sagemaker/src/models/models_2.ts +++ b/clients/client-sagemaker/src/models/models_2.ts @@ -20,25 +20,22 @@ import { AutoMLSortBy, AutoMLSortOrder, BatchStrategy, - CacheHitResult, - CallbackStepMetadata, CandidateSortBy, CandidateStatus, Channel, CheckpointConfig, - ClarifyCheckStepMetadata, CodeRepositorySortBy, CodeRepositorySortOrder, CodeRepositorySummary, CognitoConfig, CompilationJobStatus, CompilationJobSummary, - ConditionStepMetadata, ContainerDefinition, ContextSummary, EdgeOutputConfig, FeatureDefinition, FeatureType, + HyperParameterTuningJobObjectiveType, HyperParameterTuningJobStrategyType, InferenceSpecification, MetadataProperties, @@ -48,14 +45,15 @@ import { MonitoringOutputConfig, MonitoringResources, MonitoringStoppingCondition, + ObjectiveStatus, OfflineStoreConfig, OnlineStoreConfig, OutputDataConfig, OutputParameter, + ProductionVariantInstanceType, ResourceConfig, ResourceLimits, ResourceSpec, - RetryStrategy, StoppingCondition, Tag, TransformInput, @@ -76,7 +74,6 @@ import { DomainStatus, DriftCheckBaselines, EdgePackagingJobStatus, - EndpointOutputConfiguration, EndpointStatus, ExperimentConfig, ExperimentSource, @@ -84,9 +81,9 @@ import { FeatureParameter, FlowDefinitionStatus, HumanTaskConfig, - HyperParameterTuningJobStatus, - ImageStatus, - ImageVersionStatus, + HyperParameterTrainingJobDefinition, + HyperParameterTuningJobConfig, + HyperParameterTuningJobWarmStartConfig, InferenceExecutionConfig, InstanceMetadataServiceConfiguration, LabelingJobAlgorithmsConfig, @@ -114,7 +111,6 @@ import { NotebookInstanceAcceleratorType, NotebookInstanceLifecycleHook, NotificationConfiguration, - ObjectiveStatusCounters, OfflineStoreStatus, OfflineStoreStatusValue, ParallelismConfiguration, @@ -128,6 +124,7 @@ import { RecommendationJobInputConfig, RecommendationJobStoppingConditions, RecommendationJobType, + RetryStrategy, RootAccess, RuleEvaluationStatus, ServiceCatalogProvisioningDetails, @@ -135,1140 +132,1259 @@ import { SourceIpConfig, StudioLifecycleConfigAppType, TensorBoardOutputConfig, - TrainingJobStatus, - TrainingJobStatusCounters, TrialComponentArtifact, TrialComponentParameterValue, TrialComponentStatus, } from "./models_1"; -/** - *The metrics of recommendations.
- */ -export interface RecommendationMetrics { +export interface DescribeHyperParameterTuningJobRequest { /** - *Defines the cost per hour for the instance.
+ *The name of the tuning job.
*/ - CostPerHour: number | undefined; + HyperParameterTuningJobName: string | undefined; +} +export namespace DescribeHyperParameterTuningJobRequest { /** - *Defines the cost per inference for the instance .
+ * @internal */ - CostPerInference: number | undefined; + export const filterSensitiveLog = (obj: DescribeHyperParameterTuningJobRequest): any => ({ + ...obj, + }); +} +/** + *Shows the final value for the
+ * objective
+ * metric for a training job that was launched by a hyperparameter
+ * tuning job. You define the objective metric in the
+ * HyperParameterTuningJobObjective
parameter of HyperParameterTuningJobConfig.
The expected maximum number of requests per minute for the instance.
+ *Whether to + * minimize + * or maximize the objective metric. Valid values are Minimize and + * Maximize.
*/ - MaxInvocations: number | undefined; + Type?: HyperParameterTuningJobObjectiveType | string; /** - *The expected model latency at maximum invocation per minute for the instance.
+ *The name of the + * objective + * metric.
*/ - ModelLatency: number | undefined; + MetricName: string | undefined; + + /** + *The value of the objective metric.
+ */ + Value: number | undefined; } -export namespace RecommendationMetrics { +export namespace FinalHyperParameterTuningJobObjectiveMetric { /** * @internal */ - export const filterSensitiveLog = (obj: RecommendationMetrics): any => ({ + export const filterSensitiveLog = (obj: FinalHyperParameterTuningJobObjectiveMetric): any => ({ ...obj, }); } +export enum TrainingJobStatus { + COMPLETED = "Completed", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + STOPPED = "Stopped", + STOPPING = "Stopping", +} + /** - *A list of environment parameters suggested by the Amazon SageMaker Inference Recommender.
+ *The container for the summary information about a training job.
*/ -export interface EnvironmentParameter { +export interface HyperParameterTrainingJobSummary { /** - *The environment key suggested by the Amazon SageMaker Inference Recommender.
+ *The training job definition name.
*/ - Key: string | undefined; + TrainingJobDefinitionName?: string; /** - *The value type suggested by the Amazon SageMaker Inference Recommender.
+ *The name of the training job.
*/ - ValueType: string | undefined; + TrainingJobName: string | undefined; /** - *The value suggested by the Amazon SageMaker Inference Recommender.
+ *The Amazon Resource Name (ARN) of the training job.
*/ - Value: string | undefined; + TrainingJobArn: string | undefined; + + /** + *The HyperParameter tuning job that launched the training job.
+ */ + TuningJobName?: string; + + /** + *The date and time that the training job was created.
+ */ + CreationTime: Date | undefined; + + /** + *The date and time that the training job started.
+ */ + TrainingStartTime?: Date; + + /** + *Specifies the time when the training job ends on training instances. You are billed
+ * for the time interval between the value of TrainingStartTime
and this time.
+ * For successful jobs and stopped jobs, this is the time after model artifacts are
+ * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
The + * status + * of the training job.
+ */ + TrainingJobStatus: TrainingJobStatus | string | undefined; + + /** + *A + * list of the hyperparameters for which you specified ranges to + * search.
+ */ + TunedHyperParameters: RecordThe + * reason that the training job failed. + *
+ */ + FailureReason?: string; + + /** + *The FinalHyperParameterTuningJobObjectiveMetric object that + * specifies the + * value + * of the + * objective + * metric of the tuning job that launched this training job.
+ */ + FinalHyperParameterTuningJobObjectiveMetric?: FinalHyperParameterTuningJobObjectiveMetric; + + /** + *The status of the objective metric for the training job:
+ *Succeeded: The + * final + * objective metric for the training job was evaluated by the + * hyperparameter tuning job and + * used + * in the hyperparameter tuning process.
+ *Pending: The training job is in progress and evaluation of its final objective + * metric is pending.
+ *Failed: + * The final objective metric for the training job was not evaluated, and was not + * used in the hyperparameter tuning process. This typically occurs when the + * training job failed or did not emit an objective + * metric.
+ *Defines the model configuration. Includes the specification name and environment parameters.
+ *Specifies the number of training jobs that this hyperparameter tuning job launched, + * categorized by the status of their objective metric. The objective metric status shows + * whether the + * final + * objective metric for the training job has been evaluated by the + * tuning job and used in the hyperparameter tuning process.
*/ -export interface ModelConfiguration { +export interface ObjectiveStatusCounters { /** - *The inference specification name in the model package version.
+ *The number of training jobs whose final objective metric was evaluated by the + * hyperparameter tuning job and used in the hyperparameter tuning process.
*/ - InferenceSpecificationName?: string; + Succeeded?: number; /** - *Defines the environment parameters that includes key, value types, and values.
+ *The number of training jobs that are in progress and pending evaluation of their final + * objective metric.
*/ - EnvironmentParameters?: EnvironmentParameter[]; + Pending?: number; + + /** + *The number of training jobs whose final objective metric was not evaluated and used in + * the hyperparameter tuning process. This typically occurs when the training job failed or + * did not emit an objective metric.
+ */ + Failed?: number; } -export namespace ModelConfiguration { +export namespace ObjectiveStatusCounters { /** * @internal */ - export const filterSensitiveLog = (obj: ModelConfiguration): any => ({ + export const filterSensitiveLog = (obj: ObjectiveStatusCounters): any => ({ ...obj, }); } /** - *A list of recommendations made by Amazon SageMaker Inference Recommender.
+ *The numbers of training jobs launched by a hyperparameter tuning job, categorized by + * status.
*/ -export interface InferenceRecommendation { +export interface TrainingJobStatusCounters { /** - *The metrics used to decide what recommendation to make.
+ *The number of completed training jobs launched by the hyperparameter tuning + * job.
*/ - Metrics: RecommendationMetrics | undefined; + Completed?: number; /** - *Defines the endpoint configuration parameters.
+ *The number of in-progress training jobs launched by a hyperparameter tuning + * job.
*/ - EndpointConfiguration: EndpointOutputConfiguration | undefined; + InProgress?: number; /** - *Defines the model configuration.
+ *The number of training jobs that failed, but can be retried. A failed training job can + * be retried only if it failed because an internal service error occurred.
*/ - ModelConfiguration: ModelConfiguration | undefined; + RetryableError?: number; + + /** + *The number of training jobs that failed and can't be retried. A failed training job + * can't be retried if it failed because a client error occurred.
+ */ + NonRetryableError?: number; + + /** + *The number of training jobs launched by a hyperparameter tuning job that were + * manually + * stopped.
+ */ + Stopped?: number; } -export namespace InferenceRecommendation { +export namespace TrainingJobStatusCounters { /** * @internal */ - export const filterSensitiveLog = (obj: InferenceRecommendation): any => ({ + export const filterSensitiveLog = (obj: TrainingJobStatusCounters): any => ({ ...obj, }); } -export enum RecommendationJobStatus { - COMPLETED = "COMPLETED", - FAILED = "FAILED", - IN_PROGRESS = "IN_PROGRESS", - PENDING = "PENDING", - STOPPED = "STOPPED", - STOPPING = "STOPPING", -} - -export interface DescribeInferenceRecommendationsJobResponse { +export interface DescribeHyperParameterTuningJobResponse { /** - *The name of the job. The name must be unique within an - * Amazon Web Services Region in the Amazon Web Services account.
+ *The name of the tuning job.
*/ - JobName: string | undefined; + HyperParameterTuningJobName: string | undefined; /** - *The job description that you provided when you initiated the job.
+ *The + * Amazon Resource Name (ARN) of the tuning job.
*/ - JobDescription?: string; + HyperParameterTuningJobArn: string | undefined; /** - *The job type that you provided when you initiated the job.
+ *The HyperParameterTuningJobConfig object that specifies the + * configuration of the tuning job.
*/ - JobType: RecommendationJobType | string | undefined; + HyperParameterTuningJobConfig: HyperParameterTuningJobConfig | undefined; /** - *The Amazon Resource Name (ARN) of the job.
+ *The HyperParameterTrainingJobDefinition object that specifies the + * definition of the training jobs that this tuning job launches.
*/ - JobArn: string | undefined; + TrainingJobDefinition?: HyperParameterTrainingJobDefinition; /** - *The Amazon Resource Name (ARN) of the Amazon Web Services - * Identity and Access Management (IAM) role you provided when you initiated the job.
+ *A list of the HyperParameterTrainingJobDefinition objects launched + * for this tuning job.
*/ - RoleArn: string | undefined; + TrainingJobDefinitions?: HyperParameterTrainingJobDefinition[]; /** - *The status of the job.
+ *The status of the tuning job: InProgress, Completed, Failed, Stopping, or + * Stopped.
*/ - Status: RecommendationJobStatus | string | undefined; + HyperParameterTuningJobStatus: HyperParameterTuningJobStatus | string | undefined; /** - *A timestamp that shows when the job was created.
+ *The date and time that the tuning job started.
*/ CreationTime: Date | undefined; /** - *A timestamp that shows when the job completed.
+ *The date and time that the tuning job ended.
*/ - CompletionTime?: Date; + HyperParameterTuningEndTime?: Date; /** - *A timestamp that shows when the job was last modified.
+ *The date and time that the status of the tuning job was modified.
*/ - LastModifiedTime: Date | undefined; + LastModifiedTime?: Date; /** - *If the job fails, provides information why the job failed.
+ *The TrainingJobStatusCounters object that specifies the number of + * training jobs, categorized by status, that this tuning job launched.
*/ - FailureReason?: string; + TrainingJobStatusCounters: TrainingJobStatusCounters | undefined; /** - *Returns information about the versioned model package Amazon Resource Name (ARN), - * the traffic pattern, and endpoint configurations you provided when you initiated the job.
+ *The ObjectiveStatusCounters object that specifies the number of + * training jobs, categorized by the status of their final objective metric, that this + * tuning job launched.
*/ - InputConfig: RecommendationJobInputConfig | undefined; + ObjectiveStatusCounters: ObjectiveStatusCounters | undefined; /** - *The stopping conditions that you provided when you initiated the job.
+ *A TrainingJobSummary object that describes the training job that + * completed with the best current HyperParameterTuningJobObjective.
*/ - StoppingConditions?: RecommendationJobStoppingConditions; + BestTrainingJob?: HyperParameterTrainingJobSummary; /** - *The recommendations made by Inference Recommender.
+ *If the hyperparameter tuning job is an warm start tuning job with a
+ * WarmStartType
of IDENTICAL_DATA_AND_ALGORITHM
, this is the
+ * TrainingJobSummary for the training job with the best objective
+ * metric value of all training jobs launched by this tuning job and all parent jobs
+ * specified for the warm start tuning job.
The configuration for starting the hyperparameter parameter tuning job using one or + * more previous tuning jobs as a starting point. The results of previous tuning jobs are + * used to inform which combinations of hyperparameters to search over in the new tuning + * job.
+ */ + WarmStartConfig?: HyperParameterTuningJobWarmStartConfig; + + /** + *If the tuning job failed, the reason it failed.
+ */ + FailureReason?: string; } -export namespace DescribeInferenceRecommendationsJobResponse { +export namespace DescribeHyperParameterTuningJobResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeInferenceRecommendationsJobResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeHyperParameterTuningJobResponse): any => ({ ...obj, }); } -export interface DescribeLabelingJobRequest { +export interface DescribeImageRequest { /** - *The name of the labeling job to return information for.
+ *The name of the image to describe.
*/ - LabelingJobName: string | undefined; + ImageName: string | undefined; } -export namespace DescribeLabelingJobRequest { +export namespace DescribeImageRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeLabelingJobRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeImageRequest): any => ({ ...obj, }); } -/** - *Provides a breakdown of the number of objects labeled.
- */ -export interface LabelCounters { +export enum ImageStatus { + CREATED = "CREATED", + CREATE_FAILED = "CREATE_FAILED", + CREATING = "CREATING", + DELETE_FAILED = "DELETE_FAILED", + DELETING = "DELETING", + UPDATE_FAILED = "UPDATE_FAILED", + UPDATING = "UPDATING", +} + +export interface DescribeImageResponse { /** - *The total number of objects labeled.
+ *When the image was created.
*/ - TotalLabeled?: number; + CreationTime?: Date; /** - *The total number of objects labeled by a human worker.
+ *The description of the image.
*/ - HumanLabeled?: number; + Description?: string; /** - *The total number of objects labeled by automated data labeling.
+ *The name of the image as displayed.
*/ - MachineLabeled?: number; + DisplayName?: string; /** - *The total number of objects that could not be labeled due to an error.
+ *When a create, update, or delete operation fails, the reason for the failure.
*/ - FailedNonRetryableError?: number; + FailureReason?: string; /** - *The total number of objects not yet labeled.
+ *The Amazon Resource Name (ARN) of the image.
*/ - Unlabeled?: number; + ImageArn?: string; + + /** + *The name of the image.
+ */ + ImageName?: string; + + /** + *The status of the image.
+ */ + ImageStatus?: ImageStatus | string; + + /** + *When the image was last modified.
+ */ + LastModifiedTime?: Date; + + /** + *The Amazon Resource Name (ARN) of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.
+ */ + RoleArn?: string; } -export namespace LabelCounters { +export namespace DescribeImageResponse { /** * @internal */ - export const filterSensitiveLog = (obj: LabelCounters): any => ({ + export const filterSensitiveLog = (obj: DescribeImageResponse): any => ({ ...obj, }); } -/** - *Specifies the location of the output produced by the labeling job.
- */ -export interface LabelingJobOutput { +export interface DescribeImageVersionRequest { /** - *The Amazon S3 bucket location of the manifest file for labeled data.
+ *The name of the image.
*/ - OutputDatasetS3Uri: string | undefined; + ImageName: string | undefined; /** - *The Amazon Resource Name (ARN) for the most recent SageMaker model trained as part of - * automated data labeling.
+ *The version of the image. If not specified, the latest version is described.
*/ - FinalActiveLearningModelArn?: string; + Version?: number; } -export namespace LabelingJobOutput { +export namespace DescribeImageVersionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: LabelingJobOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeImageVersionRequest): any => ({ ...obj, }); } -export enum LabelingJobStatus { - COMPLETED = "Completed", - FAILED = "Failed", - INITIALIZING = "Initializing", - IN_PROGRESS = "InProgress", - STOPPED = "Stopped", - STOPPING = "Stopping", +export enum ImageVersionStatus { + CREATED = "CREATED", + CREATE_FAILED = "CREATE_FAILED", + CREATING = "CREATING", + DELETE_FAILED = "DELETE_FAILED", + DELETING = "DELETING", } -export interface DescribeLabelingJobResponse { - /** - *The processing status of the labeling job.
- */ - LabelingJobStatus: LabelingJobStatus | string | undefined; - - /** - *Provides a breakdown of the number of data objects labeled by humans, the number of - * objects labeled by machine, the number of objects than couldn't be labeled, and the - * total number of objects labeled.
- */ - LabelCounters: LabelCounters | undefined; - - /** - *If the job failed, the reason that it failed.
- */ - FailureReason?: string; - - /** - *The date and time that the labeling job was created.
- */ - CreationTime: Date | undefined; - +export interface DescribeImageVersionResponse { /** - *The date and time that the labeling job was last updated.
+ *The registry path of the container image on which this image version is based.
*/ - LastModifiedTime: Date | undefined; + BaseImage?: string; /** - *A unique identifier for work done as part of a labeling job.
+ *The registry path of the container image that contains this image version.
*/ - JobReferenceCode: string | undefined; + ContainerImage?: string; /** - *The name assigned to the labeling job when it was created.
+ *When the version was created.
*/ - LabelingJobName: string | undefined; + CreationTime?: Date; /** - *The Amazon Resource Name (ARN) of the labeling job.
+ *When a create or delete operation fails, the reason for the failure.
*/ - LabelingJobArn: string | undefined; + FailureReason?: string; /** - *The attribute used as the label in the output manifest file.
+ *The Amazon Resource Name (ARN) of the image the version is based on.
*/ - LabelAttributeName?: string; + ImageArn?: string; /** - *Input configuration information for the labeling job, such as the Amazon S3 location of the - * data objects and the location of the manifest file that describes the data - * objects.
+ *The ARN of the version.
*/ - InputConfig: LabelingJobInputConfig | undefined; + ImageVersionArn?: string; /** - *The location of the job's output data and the Amazon Web Services Key Management Service key ID for the key used to - * encrypt the output data, if any.
+ *The status of the version.
*/ - OutputConfig: LabelingJobOutputConfig | undefined; + ImageVersionStatus?: ImageVersionStatus | string; /** - *The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf - * during data labeling.
+ *When the version was last modified.
*/ - RoleArn: string | undefined; + LastModifiedTime?: Date; /** - *The S3 location of the JSON file that defines the categories used to label data - * objects. Please note the following label-category limits:
- *Semantic segmentation labeling jobs using automated labeling: 20 labels
- *Box bounding labeling jobs (all): 10 labels
- *The file is a JSON structure in the following format:
- *
- * {
- *
- * "document-version": "2018-11-28"
- *
- * "labels": [
- *
- * {
- *
- * "label": "label 1"
- *
- * },
- *
- * {
- *
- * "label": "label 2"
- *
- * },
- *
- * ...
- *
- * {
- *
- * "label": "label n"
- *
- * }
- *
- * ]
- *
- * }
- *
The version number.
*/ - LabelCategoryConfigS3Uri?: string; + Version?: number; +} +export namespace DescribeImageVersionResponse { /** - *A set of conditions for stopping a labeling job. If any of the conditions are met, the - * job is automatically stopped.
+ * @internal */ - StoppingConditions?: LabelingJobStoppingConditions; + export const filterSensitiveLog = (obj: DescribeImageVersionResponse): any => ({ + ...obj, + }); +} +export interface DescribeInferenceRecommendationsJobRequest { /** - *Configuration information for automated data labeling.
+ *The name of the job. The name must be unique within an + * Amazon Web Services Region in the Amazon Web Services account.
*/ - LabelingJobAlgorithmsConfig?: LabelingJobAlgorithmsConfig; + JobName: string | undefined; +} +export namespace DescribeInferenceRecommendationsJobRequest { /** - *Configuration information required for human workers to complete a labeling - * task.
+ * @internal */ - HumanTaskConfig: HumanTaskConfig | undefined; + export const filterSensitiveLog = (obj: DescribeInferenceRecommendationsJobRequest): any => ({ + ...obj, + }); +} +/** + *The endpoint configuration made by Inference Recommender during a recommendation job.
+ */ +export interface EndpointOutputConfiguration { /** - *An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in - * different ways, for example, by purpose, owner, or environment. For more information, - * see Tagging Amazon Web Services - * Resources.
+ *The name of the endpoint made during a recommendation job.
*/ - Tags?: Tag[]; + EndpointName: string | undefined; /** - *The location of the output produced by the labeling job.
+ *The name of the production variant (deployed model) made during a recommendation job.
*/ - LabelingJobOutput?: LabelingJobOutput; -} + VariantName: string | undefined; -export namespace DescribeLabelingJobResponse { /** - * @internal + *The instance type recommended by Amazon SageMaker Inference Recommender.
*/ - export const filterSensitiveLog = (obj: DescribeLabelingJobResponse): any => ({ - ...obj, - }); -} + InstanceType: ProductionVariantInstanceType | string | undefined; -export interface DescribeLineageGroupRequest { /** - *The name of the lineage group.
+ *The number of instances recommended to launch initially.
*/ - LineageGroupName: string | undefined; + InitialInstanceCount: number | undefined; } -export namespace DescribeLineageGroupRequest { +export namespace EndpointOutputConfiguration { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeLineageGroupRequest): any => ({ + export const filterSensitiveLog = (obj: EndpointOutputConfiguration): any => ({ ...obj, }); } -export interface DescribeLineageGroupResponse { +/** + *The metrics of recommendations.
+ */ +export interface RecommendationMetrics { /** - *The name of the lineage group.
+ *Defines the cost per hour for the instance.
*/ - LineageGroupName?: string; + CostPerHour: number | undefined; /** - *The Amazon Resource Name (ARN) of the lineage group.
+ *Defines the cost per inference for the instance .
*/ - LineageGroupArn?: string; + CostPerInference: number | undefined; /** - *The display name of the lineage group.
+ *The expected maximum number of requests per minute for the instance.
*/ - DisplayName?: string; + MaxInvocations: number | undefined; /** - *The description of the lineage group.
+ *The expected model latency at maximum invocation per minute for the instance.
*/ - Description?: string; + ModelLatency: number | undefined; +} +export namespace RecommendationMetrics { /** - *The creation time of lineage group.
+ * @internal */ - CreationTime?: Date; + export const filterSensitiveLog = (obj: RecommendationMetrics): any => ({ + ...obj, + }); +} +/** + *A list of environment parameters suggested by the Amazon SageMaker Inference Recommender.
+ */ +export interface EnvironmentParameter { /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The environment key suggested by the Amazon SageMaker Inference Recommender.
*/ - CreatedBy?: UserContext; + Key: string | undefined; /** - *The last modified time of the lineage group.
+ *The value type suggested by the Amazon SageMaker Inference Recommender.
*/ - LastModifiedTime?: Date; + ValueType: string | undefined; /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The value suggested by the Amazon SageMaker Inference Recommender.
*/ - LastModifiedBy?: UserContext; + Value: string | undefined; } -export namespace DescribeLineageGroupResponse { +export namespace EnvironmentParameter { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeLineageGroupResponse): any => ({ + export const filterSensitiveLog = (obj: EnvironmentParameter): any => ({ ...obj, }); } -export interface DescribeModelInput { +/** + *Defines the model configuration. Includes the specification name and environment parameters.
+ */ +export interface ModelConfiguration { /** - *The name of the model.
+ *The inference specification name in the model package version.
*/ - ModelName: string | undefined; + InferenceSpecificationName?: string; + + /** + *Defines the environment parameters that includes key, value types, and values.
+ */ + EnvironmentParameters?: EnvironmentParameter[]; } -export namespace DescribeModelInput { +export namespace ModelConfiguration { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelInput): any => ({ + export const filterSensitiveLog = (obj: ModelConfiguration): any => ({ ...obj, }); } -export interface DescribeModelOutput { +/** + *A list of recommendations made by Amazon SageMaker Inference Recommender.
+ */ +export interface InferenceRecommendation { /** - *Name of the SageMaker model.
+ *The metrics used to decide what recommendation to make.
*/ - ModelName: string | undefined; + Metrics: RecommendationMetrics | undefined; /** - *The location of the primary inference code, associated artifacts, and custom - * environment map that the inference code uses when it is deployed in production. - *
+ *Defines the endpoint configuration parameters.
*/ - PrimaryContainer?: ContainerDefinition; + EndpointConfiguration: EndpointOutputConfiguration | undefined; /** - *The containers in the inference pipeline.
+ *Defines the model configuration.
*/ - Containers?: ContainerDefinition[]; + ModelConfiguration: ModelConfiguration | undefined; +} +export namespace InferenceRecommendation { /** - *Specifies details of how containers in a multi-container endpoint are called.
+ * @internal */ - InferenceExecutionConfig?: InferenceExecutionConfig; - - /** - *The Amazon Resource Name (ARN) of the IAM role that you specified for the - * model.
- */ - ExecutionRoleArn: string | undefined; - - /** - *A VpcConfig object that specifies the VPC that this model has access - * to. For more information, see Protect Endpoints by Using an Amazon Virtual - * Private Cloud - *
- */ - VpcConfig?: VpcConfig; - - /** - *A timestamp that shows when the model was created.
- */ - CreationTime: Date | undefined; - - /** - *The Amazon Resource Name (ARN) of the model.
- */ - ModelArn: string | undefined; - - /** - *If True
, no inbound or outbound network calls can be made to or from the
- * model container.
The name of the model bias job definition. The name must be unique within an Amazon Web Services Region - * in the Amazon Web Services account.
+ *The name of the job. The name must be unique within an + * Amazon Web Services Region in the Amazon Web Services account.
*/ - JobDefinitionName: string | undefined; -} + JobName: string | undefined; -export namespace DescribeModelBiasJobDefinitionRequest { /** - * @internal + *The job description that you provided when you initiated the job.
*/ - export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionRequest): any => ({ - ...obj, - }); -} + JobDescription?: string; -export interface DescribeModelBiasJobDefinitionResponse { /** - *The Amazon Resource Name (ARN) of the model bias job.
+ *The job type that you provided when you initiated the job.
*/ - JobDefinitionArn: string | undefined; + JobType: RecommendationJobType | string | undefined; /** - *The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the - * Amazon Web Services account.
+ *The Amazon Resource Name (ARN) of the job.
*/ - JobDefinitionName: string | undefined; + JobArn: string | undefined; /** - *The time at which the model bias job was created.
+ *The Amazon Resource Name (ARN) of the Amazon Web Services + * Identity and Access Management (IAM) role you provided when you initiated the job.
*/ - CreationTime: Date | undefined; + RoleArn: string | undefined; /** - *The baseline configuration for a model bias job.
+ *The status of the job.
*/ - ModelBiasBaselineConfig?: ModelBiasBaselineConfig; + Status: RecommendationJobStatus | string | undefined; /** - *Configures the model bias job to run a specified Docker container image.
+ *A timestamp that shows when the job was created.
*/ - ModelBiasAppSpecification: ModelBiasAppSpecification | undefined; + CreationTime: Date | undefined; /** - *Inputs for the model bias job.
+ *A timestamp that shows when the job completed.
*/ - ModelBiasJobInput: ModelBiasJobInput | undefined; + CompletionTime?: Date; /** - *The output configuration for monitoring jobs.
+ *A timestamp that shows when the job was last modified.
*/ - ModelBiasJobOutputConfig: MonitoringOutputConfig | undefined; + LastModifiedTime: Date | undefined; /** - *Identifies the resources to deploy for a monitoring job.
+ *If the job fails, provides information why the job failed.
*/ - JobResources: MonitoringResources | undefined; + FailureReason?: string; /** - *Networking options for a model bias job.
+ *Returns information about the versioned model package Amazon Resource Name (ARN), + * the traffic pattern, and endpoint configurations you provided when you initiated the job.
*/ - NetworkConfig?: MonitoringNetworkConfig; + InputConfig: RecommendationJobInputConfig | undefined; /** - *The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that - * has read permission to the input data location and write permission to the output data - * location in Amazon S3.
+ *The stopping conditions that you provided when you initiated the job.
*/ - RoleArn: string | undefined; + StoppingConditions?: RecommendationJobStoppingConditions; /** - *A time limit for how long the monitoring job is allowed to run before stopping.
+ *The recommendations made by Inference Recommender.
*/ - StoppingCondition?: MonitoringStoppingCondition; + InferenceRecommendations?: InferenceRecommendation[]; } -export namespace DescribeModelBiasJobDefinitionResponse { +export namespace DescribeInferenceRecommendationsJobResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeInferenceRecommendationsJobResponse): any => ({ ...obj, }); } -export interface DescribeModelExplainabilityJobDefinitionRequest { +export interface DescribeLabelingJobRequest { /** - *The name of the model explainability job definition. The name must be unique within an - * Amazon Web Services Region in the Amazon Web Services account.
+ *The name of the labeling job to return information for.
*/ - JobDefinitionName: string | undefined; + LabelingJobName: string | undefined; } -export namespace DescribeModelExplainabilityJobDefinitionRequest { +export namespace DescribeLabelingJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelExplainabilityJobDefinitionRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeLabelingJobRequest): any => ({ ...obj, }); } -export interface DescribeModelExplainabilityJobDefinitionResponse { - /** - *The Amazon Resource Name (ARN) of the model explainability job.
- */ - JobDefinitionArn: string | undefined; - - /** - *The name of the explainability job definition. The name must be unique within an Amazon Web Services - * Region in the Amazon Web Services account.
- */ - JobDefinitionName: string | undefined; - - /** - *The time at which the model explainability job was created.
- */ - CreationTime: Date | undefined; - - /** - *The baseline configuration for a model explainability job.
- */ - ModelExplainabilityBaselineConfig?: ModelExplainabilityBaselineConfig; - - /** - *Configures the model explainability job to run a specified Docker container - * image.
- */ - ModelExplainabilityAppSpecification: ModelExplainabilityAppSpecification | undefined; - - /** - *Inputs for the model explainability job.
- */ - ModelExplainabilityJobInput: ModelExplainabilityJobInput | undefined; - +/** + *Provides a breakdown of the number of objects labeled.
+ */ +export interface LabelCounters { /** - *The output configuration for monitoring jobs.
+ *The total number of objects labeled.
*/ - ModelExplainabilityJobOutputConfig: MonitoringOutputConfig | undefined; + TotalLabeled?: number; /** - *Identifies the resources to deploy for a monitoring job.
+ *The total number of objects labeled by a human worker.
*/ - JobResources: MonitoringResources | undefined; + HumanLabeled?: number; /** - *Networking options for a model explainability job.
+ *The total number of objects labeled by automated data labeling.
*/ - NetworkConfig?: MonitoringNetworkConfig; + MachineLabeled?: number; /** - *The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that - * has read permission to the input data location and write permission to the output data - * location in Amazon S3.
+ *The total number of objects that could not be labeled due to an error.
*/ - RoleArn: string | undefined; + FailedNonRetryableError?: number; /** - *A time limit for how long the monitoring job is allowed to run before stopping.
+ *The total number of objects not yet labeled.
*/ - StoppingCondition?: MonitoringStoppingCondition; + Unlabeled?: number; } -export namespace DescribeModelExplainabilityJobDefinitionResponse { +export namespace LabelCounters { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelExplainabilityJobDefinitionResponse): any => ({ + export const filterSensitiveLog = (obj: LabelCounters): any => ({ ...obj, }); } -export interface DescribeModelPackageInput { +/** + *Specifies the location of the output produced by the labeling job.
+ */ +export interface LabelingJobOutput { /** - *The name or Amazon Resource Name (ARN) of the model package to describe.
- *When you specify a name, the name must have 1 to 63 characters. Valid - * characters are a-z, A-Z, 0-9, and - (hyphen).
+ *The Amazon S3 bucket location of the manifest file for labeled data.
*/ - ModelPackageName: string | undefined; + OutputDatasetS3Uri: string | undefined; + + /** + *The Amazon Resource Name (ARN) for the most recent SageMaker model trained as part of + * automated data labeling.
+ */ + FinalActiveLearningModelArn?: string; } -export namespace DescribeModelPackageInput { +export namespace LabelingJobOutput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelPackageInput): any => ({ + export const filterSensitiveLog = (obj: LabelingJobOutput): any => ({ ...obj, }); } -export enum DetailedModelPackageStatus { +export enum LabelingJobStatus { COMPLETED = "Completed", FAILED = "Failed", + INITIALIZING = "Initializing", IN_PROGRESS = "InProgress", - NOT_STARTED = "NotStarted", + STOPPED = "Stopped", + STOPPING = "Stopping", } -/** - *Represents the overall status of a model package.
- */ -export interface ModelPackageStatusItem { +export interface DescribeLabelingJobResponse { /** - *The name of the model package for which the overall status is being reported.
+ *The processing status of the labeling job.
*/ - Name: string | undefined; + LabelingJobStatus: LabelingJobStatus | string | undefined; /** - *The current status.
+ *Provides a breakdown of the number of data objects labeled by humans, the number of + * objects labeled by machine, the number of objects than couldn't be labeled, and the + * total number of objects labeled.
*/ - Status: DetailedModelPackageStatus | string | undefined; + LabelCounters: LabelCounters | undefined; /** - *if the overall status is Failed
, the reason for the failure.
If the job failed, the reason that it failed.
*/ FailureReason?: string; -} -export namespace ModelPackageStatusItem { /** - * @internal + *The date and time that the labeling job was created.
*/ - export const filterSensitiveLog = (obj: ModelPackageStatusItem): any => ({ - ...obj, - }); -} + CreationTime: Date | undefined; -/** - *Specifies the validation and image scan statuses of the model package.
- */ -export interface ModelPackageStatusDetails { /** - *The validation status of the model package.
+ *The date and time that the labeling job was last updated.
*/ - ValidationStatuses: ModelPackageStatusItem[] | undefined; + LastModifiedTime: Date | undefined; /** - *The status of the scan of the Docker image container for the model package.
+ *A unique identifier for work done as part of a labeling job.
*/ - ImageScanStatuses?: ModelPackageStatusItem[]; -} - -export namespace ModelPackageStatusDetails { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ModelPackageStatusDetails): any => ({ - ...obj, - }); -} - -export interface DescribeModelPackageOutput { - /** - *The name of the model package being described.
- */ - ModelPackageName: string | undefined; - - /** - *If the model is a versioned model, the name of the model group that the versioned - * model belongs to.
- */ - ModelPackageGroupName?: string; + JobReferenceCode: string | undefined; /** - *The version of the model package.
+ *The name assigned to the labeling job when it was created.
*/ - ModelPackageVersion?: number; + LabelingJobName: string | undefined; /** - *The Amazon Resource Name (ARN) of the model package.
+ *The Amazon Resource Name (ARN) of the labeling job.
*/ - ModelPackageArn: string | undefined; + LabelingJobArn: string | undefined; /** - *A brief summary of the model package.
+ *The attribute used as the label in the output manifest file.
*/ - ModelPackageDescription?: string; + LabelAttributeName?: string; /** - *A timestamp specifying when the model package was created.
+ *Input configuration information for the labeling job, such as the Amazon S3 location of the + * data objects and the location of the manifest file that describes the data + * objects.
*/ - CreationTime: Date | undefined; + InputConfig: LabelingJobInputConfig | undefined; /** - *Details about inference jobs that can be run with models based on this model - * package.
+ *The location of the job's output data and the Amazon Web Services Key Management Service key ID for the key used to + * encrypt the output data, if any.
*/ - InferenceSpecification?: InferenceSpecification; + OutputConfig: LabelingJobOutputConfig | undefined; /** - *Details about the algorithm that was used to create the model package.
+ *The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf + * during data labeling.
*/ - SourceAlgorithmSpecification?: SourceAlgorithmSpecification; + RoleArn: string | undefined; /** - *Configurations for one or more transform jobs that SageMaker runs to test the model - * package.
+ *The S3 location of the JSON file that defines the categories used to label data + * objects. Please note the following label-category limits:
+ *Semantic segmentation labeling jobs using automated labeling: 20 labels
+ *Box bounding labeling jobs (all): 10 labels
+ *The file is a JSON structure in the following format:
+ *
+ * {
+ *
+ * "document-version": "2018-11-28"
+ *
+ * "labels": [
+ *
+ * {
+ *
+ * "label": "label 1"
+ *
+ * },
+ *
+ * {
+ *
+ * "label": "label 2"
+ *
+ * },
+ *
+ * ...
+ *
+ * {
+ *
+ * "label": "label n"
+ *
+ * }
+ *
+ * ]
+ *
+ * }
+ *
The current status of the model package.
+ *A set of conditions for stopping a labeling job. If any of the conditions are met, the + * job is automatically stopped.
*/ - ModelPackageStatus: ModelPackageStatus | string | undefined; + StoppingConditions?: LabelingJobStoppingConditions; /** - *Details about the current status of the model package.
+ *Configuration information for automated data labeling.
*/ - ModelPackageStatusDetails: ModelPackageStatusDetails | undefined; + LabelingJobAlgorithmsConfig?: LabelingJobAlgorithmsConfig; /** - *Whether the model package is certified for listing on Amazon Web Services Marketplace.
+ *Configuration information required for human workers to complete a labeling + * task.
*/ - CertifyForMarketplace?: boolean; + HumanTaskConfig: HumanTaskConfig | undefined; /** - *The approval status of the model package.
+ *An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in + * different ways, for example, by purpose, owner, or environment. For more information, + * see Tagging Amazon Web Services + * Resources.
*/ - ModelApprovalStatus?: ModelApprovalStatus | string; + Tags?: Tag[]; /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The location of the output produced by the labeling job.
*/ - CreatedBy?: UserContext; + LabelingJobOutput?: LabelingJobOutput; +} +export namespace DescribeLabelingJobResponse { /** - *Metadata properties of the tracking entity, trial, or trial component.
+ * @internal */ - MetadataProperties?: MetadataProperties; + export const filterSensitiveLog = (obj: DescribeLabelingJobResponse): any => ({ + ...obj, + }); +} +export interface DescribeLineageGroupRequest { /** - *Metrics for the model.
+ *The name of the lineage group.
*/ - ModelMetrics?: ModelMetrics; + LineageGroupName: string | undefined; +} +export namespace DescribeLineageGroupRequest { /** - *The last time that the model package was modified.
+ * @internal */ - LastModifiedTime?: Date; + export const filterSensitiveLog = (obj: DescribeLineageGroupRequest): any => ({ + ...obj, + }); +} +export interface DescribeLineageGroupResponse { /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The name of the lineage group.
*/ - LastModifiedBy?: UserContext; + LineageGroupName?: string; /** - *A description provided for the model approval.
+ *The Amazon Resource Name (ARN) of the lineage group.
*/ - ApprovalDescription?: string; + LineageGroupArn?: string; /** - *The metadata properties associated with the model package versions.
+ *The display name of the lineage group.
*/ - CustomerMetadataProperties?: RecordRepresents the drift check baselines that can be used when the model monitor is set using the model package. - * For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines in the Amazon SageMaker Developer Guide. - *
+ *The description of the lineage group.
*/ - DriftCheckBaselines?: DriftCheckBaselines; + Description?: string; /** - *The machine learning domain of the model package you specified. Common machine - * learning domains include computer vision and natural language processing.
+ *The creation time of lineage group.
*/ - Domain?: string; + CreationTime?: Date; /** - *The machine learning task you specified that your model package accomplishes. - * Common machine learning tasks include object detection and image classification.
+ *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
*/ - Task?: string; + CreatedBy?: UserContext; /** - *The Amazon Simple Storage Service (Amazon S3) path where the sample payload are stored. This path points to a single - * gzip compressed tar archive (.tar.gz suffix).
+ *The last modified time of the lineage group.
*/ - SamplePayloadUrl?: string; + LastModifiedTime?: Date; /** - *An array of additional Inference Specification objects. Each additional - * Inference Specification specifies artifacts based on this model package that can - * be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts.
+ *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
*/ - AdditionalInferenceSpecifications?: AdditionalInferenceSpecificationDefinition[]; + LastModifiedBy?: UserContext; } -export namespace DescribeModelPackageOutput { +export namespace DescribeLineageGroupResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelPackageOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeLineageGroupResponse): any => ({ ...obj, }); } -export interface DescribeModelPackageGroupInput { +export interface DescribeModelInput { /** - *The name of gthe model group to describe.
+ *The name of the model.
*/ - ModelPackageGroupName: string | undefined; + ModelName: string | undefined; } -export namespace DescribeModelPackageGroupInput { +export namespace DescribeModelInput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelPackageGroupInput): any => ({ + export const filterSensitiveLog = (obj: DescribeModelInput): any => ({ ...obj, }); } -export enum ModelPackageGroupStatus { - COMPLETED = "Completed", - DELETE_FAILED = "DeleteFailed", - DELETING = "Deleting", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - PENDING = "Pending", -} - -export interface DescribeModelPackageGroupOutput { +export interface DescribeModelOutput { /** - *The name of the model group.
+ *Name of the SageMaker model.
*/ - ModelPackageGroupName: string | undefined; + ModelName: string | undefined; /** - *The Amazon Resource Name (ARN) of the model group.
+ *The location of the primary inference code, associated artifacts, and custom + * environment map that the inference code uses when it is deployed in production. + *
*/ - ModelPackageGroupArn: string | undefined; + PrimaryContainer?: ContainerDefinition; /** - *A description of the model group.
+ *The containers in the inference pipeline.
*/ - ModelPackageGroupDescription?: string; + Containers?: ContainerDefinition[]; /** - *The time that the model group was created.
+ *Specifies details of how containers in a multi-container endpoint are called.
*/ - CreationTime: Date | undefined; + InferenceExecutionConfig?: InferenceExecutionConfig; /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The Amazon Resource Name (ARN) of the IAM role that you specified for the + * model.
*/ - CreatedBy: UserContext | undefined; + ExecutionRoleArn: string | undefined; /** - *The status of the model group.
- */ - ModelPackageGroupStatus: ModelPackageGroupStatus | string | undefined; + *A VpcConfig object that specifies the VPC that this model has access + * to. For more information, see Protect Endpoints by Using an Amazon Virtual + * Private Cloud + *
+ */ + VpcConfig?: VpcConfig; + + /** + *A timestamp that shows when the model was created.
+ */ + CreationTime: Date | undefined; + + /** + *The Amazon Resource Name (ARN) of the model.
+ */ + ModelArn: string | undefined; + + /** + *If True
, no inbound or outbound network calls can be made to or from the
+ * model container.
The name of the model quality job. The name must be unique within an Amazon Web Services Region in the - * Amazon Web Services account.
+ *The name of the model bias job definition. The name must be unique within an Amazon Web Services Region + * in the Amazon Web Services account.
*/ JobDefinitionName: string | undefined; } -export namespace DescribeModelQualityJobDefinitionRequest { +export namespace DescribeModelBiasJobDefinitionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelQualityJobDefinitionRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionRequest): any => ({ ...obj, }); } -export interface DescribeModelQualityJobDefinitionResponse { +export interface DescribeModelBiasJobDefinitionResponse { /** - *The Amazon Resource Name (ARN) of the model quality job.
+ *The Amazon Resource Name (ARN) of the model bias job.
*/ JobDefinitionArn: string | undefined; /** - *The name of the quality job definition. The name must be unique within an Amazon Web Services Region in - * the Amazon Web Services account.
+ *The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the + * Amazon Web Services account.
*/ JobDefinitionName: string | undefined; /** - *The time at which the model quality job was created.
+ *The time at which the model bias job was created.
*/ CreationTime: Date | undefined; /** - *The baseline configuration for a model quality job.
+ *The baseline configuration for a model bias job.
*/ - ModelQualityBaselineConfig?: ModelQualityBaselineConfig; + ModelBiasBaselineConfig?: ModelBiasBaselineConfig; /** - *Configures the model quality job to run a specified Docker container image.
+ *Configures the model bias job to run a specified Docker container image.
*/ - ModelQualityAppSpecification: ModelQualityAppSpecification | undefined; + ModelBiasAppSpecification: ModelBiasAppSpecification | undefined; /** - *Inputs for the model quality job.
+ *Inputs for the model bias job.
*/ - ModelQualityJobInput: ModelQualityJobInput | undefined; + ModelBiasJobInput: ModelBiasJobInput | undefined; /** *The output configuration for monitoring jobs.
*/ - ModelQualityJobOutputConfig: MonitoringOutputConfig | undefined; + ModelBiasJobOutputConfig: MonitoringOutputConfig | undefined; /** *Identifies the resources to deploy for a monitoring job.
@@ -1276,13 +1392,14 @@ export interface DescribeModelQualityJobDefinitionResponse { JobResources: MonitoringResources | undefined; /** - *Networking options for a model quality job.
+ *Networking options for a model bias job.
*/ NetworkConfig?: MonitoringNetworkConfig; /** - *The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to - * perform tasks on your behalf.
+ *The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that + * has read permission to the input data location and write permission to the output data + * location in Amazon S3.
*/ RoleArn: string | undefined; @@ -1292,3045 +1409,2862 @@ export interface DescribeModelQualityJobDefinitionResponse { StoppingCondition?: MonitoringStoppingCondition; } -export namespace DescribeModelQualityJobDefinitionResponse { +export namespace DescribeModelBiasJobDefinitionResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeModelQualityJobDefinitionResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionResponse): any => ({ ...obj, }); } -export interface DescribeMonitoringScheduleRequest { +export interface DescribeModelExplainabilityJobDefinitionRequest { /** - *Name of a previously created monitoring schedule.
+ *The name of the model explainability job definition. The name must be unique within an + * Amazon Web Services Region in the Amazon Web Services account.
*/ - MonitoringScheduleName: string | undefined; + JobDefinitionName: string | undefined; } -export namespace DescribeMonitoringScheduleRequest { +export namespace DescribeModelExplainabilityJobDefinitionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeMonitoringScheduleRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeModelExplainabilityJobDefinitionRequest): any => ({ ...obj, }); } -export enum ExecutionStatus { - COMPLETED = "Completed", - COMPLETED_WITH_VIOLATIONS = "CompletedWithViolations", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - PENDING = "Pending", - STOPPED = "Stopped", - STOPPING = "Stopping", -} - -/** - *Summary of information about the last monitoring job to run.
- */ -export interface MonitoringExecutionSummary { +export interface DescribeModelExplainabilityJobDefinitionResponse { /** - *The name of the monitoring schedule.
+ *The Amazon Resource Name (ARN) of the model explainability job.
*/ - MonitoringScheduleName: string | undefined; + JobDefinitionArn: string | undefined; /** - *The time the monitoring job was scheduled.
+ *The name of the explainability job definition. The name must be unique within an Amazon Web Services + * Region in the Amazon Web Services account.
*/ - ScheduledTime: Date | undefined; + JobDefinitionName: string | undefined; /** - *The time at which the monitoring job was created.
+ *The time at which the model explainability job was created.
*/ CreationTime: Date | undefined; /** - *A timestamp that indicates the last time the monitoring job was modified.
+ *The baseline configuration for a model explainability job.
*/ - LastModifiedTime: Date | undefined; + ModelExplainabilityBaselineConfig?: ModelExplainabilityBaselineConfig; /** - *The status of the monitoring job.
+ *Configures the model explainability job to run a specified Docker container + * image.
*/ - MonitoringExecutionStatus: ExecutionStatus | string | undefined; + ModelExplainabilityAppSpecification: ModelExplainabilityAppSpecification | undefined; /** - *The Amazon Resource Name (ARN) of the monitoring job.
+ *Inputs for the model explainability job.
*/ - ProcessingJobArn?: string; + ModelExplainabilityJobInput: ModelExplainabilityJobInput | undefined; /** - *The name of the endpoint used to run the monitoring job.
+ *The output configuration for monitoring jobs.
*/ - EndpointName?: string; + ModelExplainabilityJobOutputConfig: MonitoringOutputConfig | undefined; /** - *Contains the reason a monitoring job failed, if it failed.
+ *Identifies the resources to deploy for a monitoring job.
*/ - FailureReason?: string; + JobResources: MonitoringResources | undefined; /** - *The name of the monitoring job.
+ *Networking options for a model explainability job.
*/ - MonitoringJobDefinitionName?: string; + NetworkConfig?: MonitoringNetworkConfig; /** - *The type of the monitoring job.
+ *The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that + * has read permission to the input data location and write permission to the output data + * location in Amazon S3.
*/ - MonitoringType?: MonitoringType | string; + RoleArn: string | undefined; + + /** + *A time limit for how long the monitoring job is allowed to run before stopping.
+ */ + StoppingCondition?: MonitoringStoppingCondition; } -export namespace MonitoringExecutionSummary { +export namespace DescribeModelExplainabilityJobDefinitionResponse { /** * @internal */ - export const filterSensitiveLog = (obj: MonitoringExecutionSummary): any => ({ + export const filterSensitiveLog = (obj: DescribeModelExplainabilityJobDefinitionResponse): any => ({ ...obj, }); } -export enum ScheduleStatus { - FAILED = "Failed", - PENDING = "Pending", - SCHEDULED = "Scheduled", - STOPPED = "Stopped", -} - -export interface DescribeMonitoringScheduleResponse { - /** - *The Amazon Resource Name (ARN) of the monitoring schedule.
- */ - MonitoringScheduleArn: string | undefined; - +export interface DescribeModelPackageInput { /** - *Name of the monitoring schedule.
+ *The name or Amazon Resource Name (ARN) of the model package to describe.
+ *When you specify a name, the name must have 1 to 63 characters. Valid + * characters are a-z, A-Z, 0-9, and - (hyphen).
*/ - MonitoringScheduleName: string | undefined; + ModelPackageName: string | undefined; +} +export namespace DescribeModelPackageInput { /** - *The status of an monitoring job.
+ * @internal */ - MonitoringScheduleStatus: ScheduleStatus | string | undefined; + export const filterSensitiveLog = (obj: DescribeModelPackageInput): any => ({ + ...obj, + }); +} - /** - *The type of the monitoring job that this schedule runs. This is one of the following - * values.
- *
- * DATA_QUALITY
- The schedule is for a data quality monitoring
- * job.
- * MODEL_QUALITY
- The schedule is for a model quality monitoring
- * job.
- * MODEL_BIAS
- The schedule is for a bias monitoring job.
- * MODEL_EXPLAINABILITY
- The schedule is for an explainability
- * monitoring job.
Represents the overall status of a model package.
+ */ +export interface ModelPackageStatusItem { /** - *A string, up to one KB in size, that contains the reason a monitoring job failed, if it - * failed.
+ *The name of the model package for which the overall status is being reported.
*/ - FailureReason?: string; + Name: string | undefined; /** - *The time at which the monitoring job was created.
+ *The current status.
*/ - CreationTime: Date | undefined; + Status: DetailedModelPackageStatus | string | undefined; /** - *The time at which the monitoring job was last modified.
+ *if the overall status is Failed
, the reason for the failure.
The configuration object that specifies the monitoring schedule and defines the - * monitoring job.
+ * @internal */ - MonitoringScheduleConfig: MonitoringScheduleConfig | undefined; + export const filterSensitiveLog = (obj: ModelPackageStatusItem): any => ({ + ...obj, + }); +} +/** + *Specifies the validation and image scan statuses of the model package.
+ */ +export interface ModelPackageStatusDetails { /** - *The name of the endpoint for the monitoring job.
+ *The validation status of the model package.
*/ - EndpointName?: string; + ValidationStatuses: ModelPackageStatusItem[] | undefined; /** - *Describes metadata on the last execution to run, if there was one.
+ *The status of the scan of the Docker image container for the model package.
*/ - LastMonitoringExecutionSummary?: MonitoringExecutionSummary; + ImageScanStatuses?: ModelPackageStatusItem[]; } -export namespace DescribeMonitoringScheduleResponse { +export namespace ModelPackageStatusDetails { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeMonitoringScheduleResponse): any => ({ + export const filterSensitiveLog = (obj: ModelPackageStatusDetails): any => ({ ...obj, }); } -export interface DescribeNotebookInstanceInput { +export interface DescribeModelPackageOutput { /** - *The name of the notebook instance that you want information about.
+ *The name of the model package being described.
*/ - NotebookInstanceName: string | undefined; -} + ModelPackageName: string | undefined; -export namespace DescribeNotebookInstanceInput { /** - * @internal + *If the model is a versioned model, the name of the model group that the versioned + * model belongs to.
*/ - export const filterSensitiveLog = (obj: DescribeNotebookInstanceInput): any => ({ - ...obj, - }); -} + ModelPackageGroupName?: string; -export enum NotebookInstanceStatus { - Deleting = "Deleting", - Failed = "Failed", - InService = "InService", - Pending = "Pending", - Stopped = "Stopped", - Stopping = "Stopping", - Updating = "Updating", -} + /** + *The version of the model package.
+ */ + ModelPackageVersion?: number; -export interface DescribeNotebookInstanceOutput { /** - *The Amazon Resource Name (ARN) of the notebook instance.
+ *The Amazon Resource Name (ARN) of the model package.
*/ - NotebookInstanceArn?: string; + ModelPackageArn: string | undefined; /** - *The name of the SageMaker notebook instance.
+ *A brief summary of the model package.
*/ - NotebookInstanceName?: string; + ModelPackageDescription?: string; /** - *The status of the notebook instance.
+ *A timestamp specifying when the model package was created.
*/ - NotebookInstanceStatus?: NotebookInstanceStatus | string; + CreationTime: Date | undefined; /** - *If status is Failed
, the reason it failed.
Details about inference jobs that can be run with models based on this model + * package.
*/ - FailureReason?: string; + InferenceSpecification?: InferenceSpecification; /** - *The URL that you use to connect to the Jupyter notebook that is running in your - * notebook instance.
+ *Details about the algorithm that was used to create the model package.
*/ - Url?: string; + SourceAlgorithmSpecification?: SourceAlgorithmSpecification; /** - *The type of ML compute instance running on the notebook instance.
+ *Configurations for one or more transform jobs that SageMaker runs to test the model + * package.
*/ - InstanceType?: _InstanceType | string; + ValidationSpecification?: ModelPackageValidationSpecification; /** - *The ID of the VPC subnet.
+ *The current status of the model package.
*/ - SubnetId?: string; + ModelPackageStatus: ModelPackageStatus | string | undefined; /** - *The IDs of the VPC security groups.
+ *Details about the current status of the model package.
*/ - SecurityGroups?: string[]; + ModelPackageStatusDetails: ModelPackageStatusDetails | undefined; /** - *The Amazon Resource Name (ARN) of the IAM role associated with the instance. - *
+ *Whether the model package is certified for listing on Amazon Web Services Marketplace.
*/ - RoleArn?: string; + CertifyForMarketplace?: boolean; /** - *The Amazon Web Services KMS key ID SageMaker uses to encrypt data when storing it on the ML storage - * volume attached to the instance.
+ *The approval status of the model package.
*/ - KmsKeyId?: string; + ModelApprovalStatus?: ModelApprovalStatus | string; /** - *The network interface IDs that SageMaker created at the time of creating the instance. - *
+ *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
*/ - NetworkInterfaceId?: string; + CreatedBy?: UserContext; /** - *A timestamp. Use this parameter to retrieve the time when the notebook instance was - * last modified.
+ *Metadata properties of the tracking entity, trial, or trial component.
*/ - LastModifiedTime?: Date; + MetadataProperties?: MetadataProperties; /** - *A timestamp. Use this parameter to return the time when the notebook instance was - * created
+ *Metrics for the model.
*/ - CreationTime?: Date; + ModelMetrics?: ModelMetrics; /** - *Returns the name of a notebook instance lifecycle configuration.
- *For information about notebook instance lifestyle configurations, see Step - * 2.1: (Optional) Customize a Notebook Instance - *
+ *The last time that the model package was modified.
*/ - NotebookInstanceLifecycleConfigName?: string; + LastModifiedTime?: Date; /** - *Describes whether SageMaker provides internet access to the notebook instance. If this - * value is set to Disabled, the notebook instance does not have - * internet access, and cannot connect to SageMaker training and endpoint services.
- *For more information, see Notebook Instances Are Internet-Enabled by Default.
+ *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
*/ - DirectInternetAccess?: DirectInternetAccess | string; + LastModifiedBy?: UserContext; /** - *The size, in GB, of the ML storage volume attached to the notebook instance.
+ *A description provided for the model approval.
*/ - VolumeSizeInGB?: number; + ApprovalDescription?: string; /** - *A list of the Elastic Inference (EI) instance types associated with this notebook - * instance. Currently only one EI instance type can be associated with a notebook - * instance. For more information, see Using Elastic Inference in Amazon - * SageMaker.
+ *The metadata properties associated with the model package versions.
*/ - AcceleratorTypes?: (NotebookInstanceAcceleratorType | string)[]; + CustomerMetadataProperties?: RecordThe Git repository associated with the notebook instance as its default code - * repository. This can be either the name of a Git repository stored as a resource in your - * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any - * other Git repository. When you open a notebook instance, it opens in the directory that - * contains this repository. For more information, see Associating Git Repositories with SageMaker - * Notebook Instances.
+ *Represents the drift check baselines that can be used when the model monitor is set using the model package. + * For more information, see the topic on Drift Detection against Previous Baselines in SageMaker Pipelines in the Amazon SageMaker Developer Guide. + *
*/ - DefaultCodeRepository?: string; + DriftCheckBaselines?: DriftCheckBaselines; /** - *An array of up to three Git repositories associated with the notebook instance. These - * can be either the names of Git repositories stored as resources in your account, or the - * URL of Git repositories in Amazon Web Services CodeCommit or in any - * other Git repository. These repositories are cloned at the same level as the default - * repository of your notebook instance. For more information, see Associating Git - * Repositories with SageMaker Notebook Instances.
+ *The machine learning domain of the model package you specified. Common machine + * learning domains include computer vision and natural language processing.
*/ - AdditionalCodeRepositories?: string[]; + Domain?: string; /** - *Whether root access is enabled or disabled for users of the notebook instance.
- *Lifecycle configurations need root access to be able to set up a notebook - * instance. Because of this, lifecycle configurations associated with a notebook - * instance always run with root access even if you disable root access for - * users.
- *The machine learning task you specified that your model package accomplishes. + * Common machine learning tasks include object detection and image classification.
*/ - RootAccess?: RootAccess | string; + Task?: string; /** - *The platform identifier of the notebook instance runtime environment.
+ *The Amazon Simple Storage Service (Amazon S3) path where the sample payload are stored. This path points to a single + * gzip compressed tar archive (.tar.gz suffix).
*/ - PlatformIdentifier?: string; + SamplePayloadUrl?: string; /** - *Information on the IMDS configuration of the notebook instance
+ *An array of additional Inference Specification objects. Each additional + * Inference Specification specifies artifacts based on this model package that can + * be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts.
*/ - InstanceMetadataServiceConfiguration?: InstanceMetadataServiceConfiguration; + AdditionalInferenceSpecifications?: AdditionalInferenceSpecificationDefinition[]; } -export namespace DescribeNotebookInstanceOutput { +export namespace DescribeModelPackageOutput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeNotebookInstanceOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeModelPackageOutput): any => ({ ...obj, }); } -export interface DescribeNotebookInstanceLifecycleConfigInput { +export interface DescribeModelPackageGroupInput { /** - *The name of the lifecycle configuration to describe.
+ *The name of gthe model group to describe.
*/ - NotebookInstanceLifecycleConfigName: string | undefined; + ModelPackageGroupName: string | undefined; } -export namespace DescribeNotebookInstanceLifecycleConfigInput { +export namespace DescribeModelPackageGroupInput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeNotebookInstanceLifecycleConfigInput): any => ({ + export const filterSensitiveLog = (obj: DescribeModelPackageGroupInput): any => ({ ...obj, }); } -export interface DescribeNotebookInstanceLifecycleConfigOutput { +export enum ModelPackageGroupStatus { + COMPLETED = "Completed", + DELETE_FAILED = "DeleteFailed", + DELETING = "Deleting", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + PENDING = "Pending", +} + +export interface DescribeModelPackageGroupOutput { /** - *The Amazon Resource Name (ARN) of the lifecycle configuration.
+ *The name of the model group.
*/ - NotebookInstanceLifecycleConfigArn?: string; + ModelPackageGroupName: string | undefined; /** - *The name of the lifecycle configuration.
+ *The Amazon Resource Name (ARN) of the model group.
*/ - NotebookInstanceLifecycleConfigName?: string; + ModelPackageGroupArn: string | undefined; /** - *The shell script that runs only once, when you create a notebook instance.
+ *A description of the model group.
*/ - OnCreate?: NotebookInstanceLifecycleHook[]; + ModelPackageGroupDescription?: string; /** - *The shell script that runs every time you start a notebook instance, including when - * you create the notebook instance.
+ *The time that the model group was created.
*/ - OnStart?: NotebookInstanceLifecycleHook[]; + CreationTime: Date | undefined; /** - *A timestamp that tells when the lifecycle configuration was last modified.
+ *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
*/ - LastModifiedTime?: Date; + CreatedBy: UserContext | undefined; /** - *A timestamp that tells when the lifecycle configuration was created.
+ *The status of the model group.
*/ - CreationTime?: Date; + ModelPackageGroupStatus: ModelPackageGroupStatus | string | undefined; } -export namespace DescribeNotebookInstanceLifecycleConfigOutput { +export namespace DescribeModelPackageGroupOutput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeNotebookInstanceLifecycleConfigOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeModelPackageGroupOutput): any => ({ ...obj, }); } -export interface DescribePipelineRequest { +export interface DescribeModelQualityJobDefinitionRequest { /** - *The name of the pipeline to describe.
+ *The name of the model quality job. The name must be unique within an Amazon Web Services Region in the + * Amazon Web Services account.
*/ - PipelineName: string | undefined; + JobDefinitionName: string | undefined; } -export namespace DescribePipelineRequest { +export namespace DescribeModelQualityJobDefinitionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribePipelineRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeModelQualityJobDefinitionRequest): any => ({ ...obj, }); } -export enum PipelineStatus { - ACTIVE = "Active", -} - -export interface DescribePipelineResponse { +export interface DescribeModelQualityJobDefinitionResponse { /** - *The Amazon Resource Name (ARN) of the pipeline.
+ *The Amazon Resource Name (ARN) of the model quality job.
*/ - PipelineArn?: string; + JobDefinitionArn: string | undefined; /** - *The name of the pipeline.
+ *The name of the quality job definition. The name must be unique within an Amazon Web Services Region in + * the Amazon Web Services account.
*/ - PipelineName?: string; + JobDefinitionName: string | undefined; /** - *The display name of the pipeline.
+ *The time at which the model quality job was created.
*/ - PipelineDisplayName?: string; + CreationTime: Date | undefined; /** - *The JSON pipeline definition.
+ *The baseline configuration for a model quality job.
*/ - PipelineDefinition?: string; + ModelQualityBaselineConfig?: ModelQualityBaselineConfig; /** - *The description of the pipeline.
+ *Configures the model quality job to run a specified Docker container image.
*/ - PipelineDescription?: string; + ModelQualityAppSpecification: ModelQualityAppSpecification | undefined; /** - *The Amazon Resource Name (ARN) that the pipeline uses to execute.
+ *Inputs for the model quality job.
*/ - RoleArn?: string; + ModelQualityJobInput: ModelQualityJobInput | undefined; /** - *The status of the pipeline execution.
+ *The output configuration for monitoring jobs.
*/ - PipelineStatus?: PipelineStatus | string; + ModelQualityJobOutputConfig: MonitoringOutputConfig | undefined; /** - *The time when the pipeline was created.
+ *Identifies the resources to deploy for a monitoring job.
*/ - CreationTime?: Date; + JobResources: MonitoringResources | undefined; /** - *The time when the pipeline was last modified.
+ *Networking options for a model quality job.
*/ - LastModifiedTime?: Date; + NetworkConfig?: MonitoringNetworkConfig; /** - *The time when the pipeline was last run.
+ *The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to + * perform tasks on your behalf.
*/ - LastRunTime?: Date; + RoleArn: string | undefined; /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *A time limit for how long the monitoring job is allowed to run before stopping.
*/ - CreatedBy?: UserContext; + StoppingCondition?: MonitoringStoppingCondition; +} +export namespace DescribeModelQualityJobDefinitionResponse { /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ * @internal */ - LastModifiedBy?: UserContext; + export const filterSensitiveLog = (obj: DescribeModelQualityJobDefinitionResponse): any => ({ + ...obj, + }); +} +export interface DescribeMonitoringScheduleRequest { /** - *Lists the parallelism configuration applied to the pipeline.
+ *Name of a previously created monitoring schedule.
*/ - ParallelismConfiguration?: ParallelismConfiguration; + MonitoringScheduleName: string | undefined; } -export namespace DescribePipelineResponse { +export namespace DescribeMonitoringScheduleRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribePipelineResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeMonitoringScheduleRequest): any => ({ ...obj, }); } -export interface DescribePipelineDefinitionForExecutionRequest { +export enum ExecutionStatus { + COMPLETED = "Completed", + COMPLETED_WITH_VIOLATIONS = "CompletedWithViolations", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + PENDING = "Pending", + STOPPED = "Stopped", + STOPPING = "Stopping", +} + +/** + *Summary of information about the last monitoring job to run.
+ */ +export interface MonitoringExecutionSummary { /** - *The Amazon Resource Name (ARN) of the pipeline execution.
+ *The name of the monitoring schedule.
*/ - PipelineExecutionArn: string | undefined; -} + MonitoringScheduleName: string | undefined; -export namespace DescribePipelineDefinitionForExecutionRequest { /** - * @internal + *The time the monitoring job was scheduled.
*/ - export const filterSensitiveLog = (obj: DescribePipelineDefinitionForExecutionRequest): any => ({ - ...obj, - }); -} + ScheduledTime: Date | undefined; -export interface DescribePipelineDefinitionForExecutionResponse { /** - *The JSON pipeline definition.
+ *The time at which the monitoring job was created.
*/ - PipelineDefinition?: string; + CreationTime: Date | undefined; /** - *The time when the pipeline was created.
+ *A timestamp that indicates the last time the monitoring job was modified.
*/ - CreationTime?: Date; -} + LastModifiedTime: Date | undefined; -export namespace DescribePipelineDefinitionForExecutionResponse { /** - * @internal + *The status of the monitoring job.
*/ - export const filterSensitiveLog = (obj: DescribePipelineDefinitionForExecutionResponse): any => ({ - ...obj, - }); -} + MonitoringExecutionStatus: ExecutionStatus | string | undefined; -export interface DescribePipelineExecutionRequest { /** - *The Amazon Resource Name (ARN) of the pipeline execution.
+ *The Amazon Resource Name (ARN) of the monitoring job.
*/ - PipelineExecutionArn: string | undefined; -} + ProcessingJobArn?: string; -export namespace DescribePipelineExecutionRequest { /** - * @internal + *The name of the endpoint used to run the monitoring job.
*/ - export const filterSensitiveLog = (obj: DescribePipelineExecutionRequest): any => ({ - ...obj, - }); -} + EndpointName?: string; -export enum PipelineExecutionStatus { - EXECUTING = "Executing", - FAILED = "Failed", - STOPPED = "Stopped", - STOPPING = "Stopping", - SUCCEEDED = "Succeeded", -} + /** + *Contains the reason a monitoring job failed, if it failed.
+ */ + FailureReason?: string; -/** - *Specifies the names of the experiment and trial created by a pipeline.
- */ -export interface PipelineExperimentConfig { /** - *The name of the experiment.
+ *The name of the monitoring job.
*/ - ExperimentName?: string; + MonitoringJobDefinitionName?: string; /** - *The name of the trial.
+ *The type of the monitoring job.
*/ - TrialName?: string; + MonitoringType?: MonitoringType | string; } -export namespace PipelineExperimentConfig { +export namespace MonitoringExecutionSummary { /** * @internal */ - export const filterSensitiveLog = (obj: PipelineExperimentConfig): any => ({ + export const filterSensitiveLog = (obj: MonitoringExecutionSummary): any => ({ ...obj, }); } -export interface DescribePipelineExecutionResponse { - /** - *The Amazon Resource Name (ARN) of the pipeline.
- */ - PipelineArn?: string; - - /** - *The Amazon Resource Name (ARN) of the pipeline execution.
- */ - PipelineExecutionArn?: string; +export enum ScheduleStatus { + FAILED = "Failed", + PENDING = "Pending", + SCHEDULED = "Scheduled", + STOPPED = "Stopped", +} +export interface DescribeMonitoringScheduleResponse { /** - *The display name of the pipeline execution.
+ *The Amazon Resource Name (ARN) of the monitoring schedule.
*/ - PipelineExecutionDisplayName?: string; + MonitoringScheduleArn: string | undefined; /** - *The status of the pipeline execution.
+ *Name of the monitoring schedule.
*/ - PipelineExecutionStatus?: PipelineExecutionStatus | string; + MonitoringScheduleName: string | undefined; /** - *The description of the pipeline execution.
+ *The status of an monitoring job.
*/ - PipelineExecutionDescription?: string; + MonitoringScheduleStatus: ScheduleStatus | string | undefined; /** - *Specifies the names of the experiment and trial created by a pipeline.
+ *The type of the monitoring job that this schedule runs. This is one of the following + * values.
+ *
+ * DATA_QUALITY
- The schedule is for a data quality monitoring
+ * job.
+ * MODEL_QUALITY
- The schedule is for a model quality monitoring
+ * job.
+ * MODEL_BIAS
- The schedule is for a bias monitoring job.
+ * MODEL_EXPLAINABILITY
- The schedule is for an explainability
+ * monitoring job.
If the execution failed, a message describing why.
+ *A string, up to one KB in size, that contains the reason a monitoring job failed, if it + * failed.
*/ FailureReason?: string; /** - *The time when the pipeline execution was created.
+ *The time at which the monitoring job was created.
*/ - CreationTime?: Date; + CreationTime: Date | undefined; /** - *The time when the pipeline execution was modified last.
+ *The time at which the monitoring job was last modified.
*/ - LastModifiedTime?: Date; + LastModifiedTime: Date | undefined; /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The configuration object that specifies the monitoring schedule and defines the + * monitoring job.
*/ - CreatedBy?: UserContext; + MonitoringScheduleConfig: MonitoringScheduleConfig | undefined; /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The name of the endpoint for the monitoring job.
*/ - LastModifiedBy?: UserContext; + EndpointName?: string; /** - *The parallelism configuration applied to the pipeline.
+ *Describes metadata on the last execution to run, if there was one.
*/ - ParallelismConfiguration?: ParallelismConfiguration; + LastMonitoringExecutionSummary?: MonitoringExecutionSummary; } -export namespace DescribePipelineExecutionResponse { +export namespace DescribeMonitoringScheduleResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribePipelineExecutionResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeMonitoringScheduleResponse): any => ({ ...obj, }); } -export interface DescribeProcessingJobRequest { +export interface DescribeNotebookInstanceInput { /** - *The name of the processing job. The name must be unique within an Amazon Web Services Region in the - * Amazon Web Services account.
+ *The name of the notebook instance that you want information about.
*/ - ProcessingJobName: string | undefined; + NotebookInstanceName: string | undefined; } -export namespace DescribeProcessingJobRequest { +export namespace DescribeNotebookInstanceInput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeProcessingJobRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeNotebookInstanceInput): any => ({ ...obj, }); } -export enum ProcessingJobStatus { - COMPLETED = "Completed", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - STOPPED = "Stopped", - STOPPING = "Stopping", +export enum NotebookInstanceStatus { + Deleting = "Deleting", + Failed = "Failed", + InService = "InService", + Pending = "Pending", + Stopped = "Stopped", + Stopping = "Stopping", + Updating = "Updating", } -export interface DescribeProcessingJobResponse { +export interface DescribeNotebookInstanceOutput { /** - *The inputs for a processing job.
+ *The Amazon Resource Name (ARN) of the notebook instance.
*/ - ProcessingInputs?: ProcessingInput[]; + NotebookInstanceArn?: string; /** - *Output configuration for the processing job.
+ *The name of the SageMaker notebook instance.
*/ - ProcessingOutputConfig?: ProcessingOutputConfig; + NotebookInstanceName?: string; /** - *The name of the processing job. The name must be unique within an Amazon Web Services Region in the - * Amazon Web Services account.
+ *The status of the notebook instance.
*/ - ProcessingJobName: string | undefined; + NotebookInstanceStatus?: NotebookInstanceStatus | string; /** - *Identifies the resources, ML compute instances, and ML storage volumes to deploy for a - * processing job. In distributed training, you specify more than one instance.
+ *If status is Failed
, the reason it failed.
The time limit for how long the processing job is allowed to run.
+ *The URL that you use to connect to the Jupyter notebook that is running in your + * notebook instance.
*/ - StoppingCondition?: ProcessingStoppingCondition; + Url?: string; /** - *Configures the processing job to run a specified container image.
+ *The type of ML compute instance running on the notebook instance.
*/ - AppSpecification: AppSpecification | undefined; + InstanceType?: _InstanceType | string; /** - *The environment variables set in the Docker container.
+ *The ID of the VPC subnet.
*/ - Environment?: RecordNetworking options for a processing job.
+ *The IDs of the VPC security groups.
*/ - NetworkConfig?: NetworkConfig; + SecurityGroups?: string[]; /** - *The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on - * your behalf.
+ *The Amazon Resource Name (ARN) of the IAM role associated with the instance. + *
*/ RoleArn?: string; /** - *The configuration information used to create an experiment.
+ *The Amazon Web Services KMS key ID SageMaker uses to encrypt data when storing it on the ML storage + * volume attached to the instance.
*/ - ExperimentConfig?: ExperimentConfig; + KmsKeyId?: string; /** - *The Amazon Resource Name (ARN) of the processing job.
+ *The network interface IDs that SageMaker created at the time of creating the instance. + *
*/ - ProcessingJobArn: string | undefined; + NetworkInterfaceId?: string; /** - *Provides the status of a processing job.
+ *A timestamp. Use this parameter to retrieve the time when the notebook instance was + * last modified.
*/ - ProcessingJobStatus: ProcessingJobStatus | string | undefined; + LastModifiedTime?: Date; /** - *An optional string, up to one KB in size, that contains metadata from the processing - * container when the processing job exits.
+ *A timestamp. Use this parameter to return the time when the notebook instance was + * created
*/ - ExitMessage?: string; + CreationTime?: Date; /** - *A string, up to one KB in size, that contains the reason a processing job failed, if - * it failed.
+ *Returns the name of a notebook instance lifecycle configuration.
+ *For information about notebook instance lifestyle configurations, see Step + * 2.1: (Optional) Customize a Notebook Instance + *
*/ - FailureReason?: string; + NotebookInstanceLifecycleConfigName?: string; /** - *The time at which the processing job completed.
+ *Describes whether SageMaker provides internet access to the notebook instance. If this + * value is set to Disabled, the notebook instance does not have + * internet access, and cannot connect to SageMaker training and endpoint services.
+ *For more information, see Notebook Instances Are Internet-Enabled by Default.
*/ - ProcessingEndTime?: Date; + DirectInternetAccess?: DirectInternetAccess | string; /** - *The time at which the processing job started.
+ *The size, in GB, of the ML storage volume attached to the notebook instance.
*/ - ProcessingStartTime?: Date; + VolumeSizeInGB?: number; /** - *The time at which the processing job was last modified.
+ *A list of the Elastic Inference (EI) instance types associated with this notebook + * instance. Currently only one EI instance type can be associated with a notebook + * instance. For more information, see Using Elastic Inference in Amazon + * SageMaker.
*/ - LastModifiedTime?: Date; + AcceleratorTypes?: (NotebookInstanceAcceleratorType | string)[]; /** - *The time at which the processing job was created.
+ *The Git repository associated with the notebook instance as its default code + * repository. This can be either the name of a Git repository stored as a resource in your + * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any + * other Git repository. When you open a notebook instance, it opens in the directory that + * contains this repository. For more information, see Associating Git Repositories with SageMaker + * Notebook Instances.
*/ - CreationTime: Date | undefined; + DefaultCodeRepository?: string; /** - *The ARN of a monitoring schedule for an endpoint associated with this processing - * job.
+ *An array of up to three Git repositories associated with the notebook instance. These + * can be either the names of Git repositories stored as resources in your account, or the + * URL of Git repositories in Amazon Web Services CodeCommit or in any + * other Git repository. These repositories are cloned at the same level as the default + * repository of your notebook instance. For more information, see Associating Git + * Repositories with SageMaker Notebook Instances.
*/ - MonitoringScheduleArn?: string; + AdditionalCodeRepositories?: string[]; /** - *The ARN of an AutoML job associated with this processing job.
+ *Whether root access is enabled or disabled for users of the notebook instance.
+ *Lifecycle configurations need root access to be able to set up a notebook + * instance. Because of this, lifecycle configurations associated with a notebook + * instance always run with root access even if you disable root access for + * users.
+ *The ARN of a training job associated with this processing job.
+ *The platform identifier of the notebook instance runtime environment.
*/ - TrainingJobArn?: string; + PlatformIdentifier?: string; + + /** + *Information on the IMDS configuration of the notebook instance
+ */ + InstanceMetadataServiceConfiguration?: InstanceMetadataServiceConfiguration; } -export namespace DescribeProcessingJobResponse { +export namespace DescribeNotebookInstanceOutput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeProcessingJobResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeNotebookInstanceOutput): any => ({ ...obj, }); } -export interface DescribeProjectInput { +export interface DescribeNotebookInstanceLifecycleConfigInput { /** - *The name of the project to describe.
+ *The name of the lifecycle configuration to describe.
*/ - ProjectName: string | undefined; + NotebookInstanceLifecycleConfigName: string | undefined; } -export namespace DescribeProjectInput { +export namespace DescribeNotebookInstanceLifecycleConfigInput { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeProjectInput): any => ({ + export const filterSensitiveLog = (obj: DescribeNotebookInstanceLifecycleConfigInput): any => ({ ...obj, }); } -export enum ProjectStatus { - CREATE_COMPLETED = "CreateCompleted", - CREATE_FAILED = "CreateFailed", - CREATE_IN_PROGRESS = "CreateInProgress", - DELETE_COMPLETED = "DeleteCompleted", - DELETE_FAILED = "DeleteFailed", - DELETE_IN_PROGRESS = "DeleteInProgress", - PENDING = "Pending", - UPDATE_COMPLETED = "UpdateCompleted", - UPDATE_FAILED = "UpdateFailed", - UPDATE_IN_PROGRESS = "UpdateInProgress", -} +export interface DescribeNotebookInstanceLifecycleConfigOutput { + /** + *The Amazon Resource Name (ARN) of the lifecycle configuration.
+ */ + NotebookInstanceLifecycleConfigArn?: string; -/** - *Details of a provisioned service catalog product. For information about service catalog, - * see What is Amazon Web Services Service - * Catalog.
- */ -export interface ServiceCatalogProvisionedProductDetails { /** - *The ID of the provisioned product.
+ *The name of the lifecycle configuration.
*/ - ProvisionedProductId?: string; + NotebookInstanceLifecycleConfigName?: string; /** - *The current status of the product.
- *
- * AVAILABLE
- Stable state, ready to perform any operation. The most recent operation succeeded and completed.
- * UNDER_CHANGE
- Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.
- * TAINTED
- Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.
- * ERROR
- An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.
- * PLAN_IN_PROGRESS
- Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.
The shell script that runs only once, when you create a notebook instance.
*/ - ProvisionedProductStatusMessage?: string; + OnCreate?: NotebookInstanceLifecycleHook[]; + + /** + *The shell script that runs every time you start a notebook instance, including when + * you create the notebook instance.
+ */ + OnStart?: NotebookInstanceLifecycleHook[]; + + /** + *A timestamp that tells when the lifecycle configuration was last modified.
+ */ + LastModifiedTime?: Date; + + /** + *A timestamp that tells when the lifecycle configuration was created.
+ */ + CreationTime?: Date; } -export namespace ServiceCatalogProvisionedProductDetails { +export namespace DescribeNotebookInstanceLifecycleConfigOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ServiceCatalogProvisionedProductDetails): any => ({ + export const filterSensitiveLog = (obj: DescribeNotebookInstanceLifecycleConfigOutput): any => ({ ...obj, }); } -export interface DescribeProjectOutput { +export interface DescribePipelineRequest { /** - *The Amazon Resource Name (ARN) of the project.
+ *The name of the pipeline to describe.
*/ - ProjectArn: string | undefined; + PipelineName: string | undefined; +} +export namespace DescribePipelineRequest { /** - *The name of the project.
+ * @internal */ - ProjectName: string | undefined; + export const filterSensitiveLog = (obj: DescribePipelineRequest): any => ({ + ...obj, + }); +} + +export enum PipelineStatus { + ACTIVE = "Active", +} +export interface DescribePipelineResponse { /** - *The ID of the project.
+ *The Amazon Resource Name (ARN) of the pipeline.
*/ - ProjectId: string | undefined; + PipelineArn?: string; /** - *The description of the project.
+ *The name of the pipeline.
*/ - ProjectDescription?: string; + PipelineName?: string; /** - *Information used to provision a service catalog product. For information, see What is Amazon Web Services Service - * Catalog.
+ *The display name of the pipeline.
*/ - ServiceCatalogProvisioningDetails: ServiceCatalogProvisioningDetails | undefined; + PipelineDisplayName?: string; /** - *Information about a provisioned service catalog product.
+ *The JSON pipeline definition.
*/ - ServiceCatalogProvisionedProductDetails?: ServiceCatalogProvisionedProductDetails; + PipelineDefinition?: string; /** - *The status of the project.
+ *The description of the pipeline.
*/ - ProjectStatus: ProjectStatus | string | undefined; + PipelineDescription?: string; /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The Amazon Resource Name (ARN) that the pipeline uses to execute.
*/ - CreatedBy?: UserContext; + RoleArn?: string; /** - *The time when the project was created.
+ *The status of the pipeline execution.
*/ - CreationTime: Date | undefined; + PipelineStatus?: PipelineStatus | string; /** - *The timestamp when project was last modified.
+ *The time when the pipeline was created.
*/ - LastModifiedTime?: Date; + CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The time when the pipeline was last modified.
*/ - LastModifiedBy?: UserContext; -} + LastModifiedTime?: Date; -export namespace DescribeProjectOutput { /** - * @internal + *The time when the pipeline was last run.
*/ - export const filterSensitiveLog = (obj: DescribeProjectOutput): any => ({ - ...obj, - }); -} + LastRunTime?: Date; -export interface DescribeStudioLifecycleConfigRequest { /** - *The name of the Studio Lifecycle Configuration to describe.
+ *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
*/ - StudioLifecycleConfigName: string | undefined; + CreatedBy?: UserContext; + + /** + *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
+ */ + LastModifiedBy?: UserContext; + + /** + *Lists the parallelism configuration applied to the pipeline.
+ */ + ParallelismConfiguration?: ParallelismConfiguration; } -export namespace DescribeStudioLifecycleConfigRequest { +export namespace DescribePipelineResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeStudioLifecycleConfigRequest): any => ({ + export const filterSensitiveLog = (obj: DescribePipelineResponse): any => ({ ...obj, }); } -export interface DescribeStudioLifecycleConfigResponse { +export interface DescribePipelineDefinitionForExecutionRequest { /** - *The ARN of the Lifecycle Configuration to describe.
+ *The Amazon Resource Name (ARN) of the pipeline execution.
*/ - StudioLifecycleConfigArn?: string; + PipelineExecutionArn: string | undefined; +} +export namespace DescribePipelineDefinitionForExecutionRequest { /** - *The name of the Studio Lifecycle Configuration that is described.
+ * @internal */ - StudioLifecycleConfigName?: string; + export const filterSensitiveLog = (obj: DescribePipelineDefinitionForExecutionRequest): any => ({ + ...obj, + }); +} +export interface DescribePipelineDefinitionForExecutionResponse { /** - *The creation time of the Studio Lifecycle Configuration.
+ *The JSON pipeline definition.
*/ - CreationTime?: Date; + PipelineDefinition?: string; /** - *This value is equivalent to CreationTime because Studio Lifecycle Configurations are immutable.
+ *The time when the pipeline was created.
*/ - LastModifiedTime?: Date; + CreationTime?: Date; +} +export namespace DescribePipelineDefinitionForExecutionResponse { /** - *The content of your Studio Lifecycle Configuration script.
+ * @internal */ - StudioLifecycleConfigContent?: string; + export const filterSensitiveLog = (obj: DescribePipelineDefinitionForExecutionResponse): any => ({ + ...obj, + }); +} +export interface DescribePipelineExecutionRequest { /** - *The App type that the Lifecycle Configuration is attached to.
+ *The Amazon Resource Name (ARN) of the pipeline execution.
*/ - StudioLifecycleConfigAppType?: StudioLifecycleConfigAppType | string; + PipelineExecutionArn: string | undefined; } -export namespace DescribeStudioLifecycleConfigResponse { +export namespace DescribePipelineExecutionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeStudioLifecycleConfigResponse): any => ({ + export const filterSensitiveLog = (obj: DescribePipelineExecutionRequest): any => ({ ...obj, }); } -export interface DescribeSubscribedWorkteamRequest { +export enum PipelineExecutionStatus { + EXECUTING = "Executing", + FAILED = "Failed", + STOPPED = "Stopped", + STOPPING = "Stopping", + SUCCEEDED = "Succeeded", +} + +/** + *Specifies the names of the experiment and trial created by a pipeline.
+ */ +export interface PipelineExperimentConfig { /** - *The Amazon Resource Name (ARN) of the subscribed work team to describe.
+ *The name of the experiment.
*/ - WorkteamArn: string | undefined; + ExperimentName?: string; + + /** + *The name of the trial.
+ */ + TrialName?: string; } -export namespace DescribeSubscribedWorkteamRequest { +export namespace PipelineExperimentConfig { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeSubscribedWorkteamRequest): any => ({ + export const filterSensitiveLog = (obj: PipelineExperimentConfig): any => ({ ...obj, }); } -/** - *Describes a work team of a vendor that does the a labelling job.
- */ -export interface SubscribedWorkteam { +export interface DescribePipelineExecutionResponse { /** - *The Amazon Resource Name (ARN) of the vendor that you have subscribed.
+ *The Amazon Resource Name (ARN) of the pipeline.
*/ - WorkteamArn: string | undefined; + PipelineArn?: string; /** - *The title of the service provided by the vendor in the Amazon Marketplace.
+ *The Amazon Resource Name (ARN) of the pipeline execution.
*/ - MarketplaceTitle?: string; + PipelineExecutionArn?: string; /** - *The name of the vendor in the Amazon Marketplace.
+ *The display name of the pipeline execution.
*/ - SellerName?: string; + PipelineExecutionDisplayName?: string; /** - *The description of the vendor from the Amazon Marketplace.
+ *The status of the pipeline execution.
*/ - MarketplaceDescription?: string; + PipelineExecutionStatus?: PipelineExecutionStatus | string; /** - *Marketplace product listing ID.
+ *The description of the pipeline execution.
*/ - ListingId?: string; -} + PipelineExecutionDescription?: string; -export namespace SubscribedWorkteam { /** - * @internal + *Specifies the names of the experiment and trial created by a pipeline.
*/ - export const filterSensitiveLog = (obj: SubscribedWorkteam): any => ({ - ...obj, - }); -} + PipelineExperimentConfig?: PipelineExperimentConfig; -export interface DescribeSubscribedWorkteamResponse { /** - *A Workteam
instance that contains information about the work team.
If the execution failed, a message describing why.
*/ - SubscribedWorkteam: SubscribedWorkteam | undefined; + FailureReason?: string; + + /** + *The time when the pipeline execution was created.
+ */ + CreationTime?: Date; + + /** + *The time when the pipeline execution was modified last.
+ */ + LastModifiedTime?: Date; + + /** + *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
+ */ + CreatedBy?: UserContext; + + /** + *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
+ */ + LastModifiedBy?: UserContext; + + /** + *The parallelism configuration applied to the pipeline.
+ */ + ParallelismConfiguration?: ParallelismConfiguration; } -export namespace DescribeSubscribedWorkteamResponse { +export namespace DescribePipelineExecutionResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeSubscribedWorkteamResponse): any => ({ + export const filterSensitiveLog = (obj: DescribePipelineExecutionResponse): any => ({ ...obj, }); } -export interface DescribeTrainingJobRequest { +export interface DescribeProcessingJobRequest { /** - *The name of the training job.
+ *The name of the processing job. The name must be unique within an Amazon Web Services Region in the + * Amazon Web Services account.
*/ - TrainingJobName: string | undefined; + ProcessingJobName: string | undefined; } -export namespace DescribeTrainingJobRequest { +export namespace DescribeProcessingJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTrainingJobRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeProcessingJobRequest): any => ({ ...obj, }); } -/** - *The name, value, and date and time of a metric that was emitted to Amazon CloudWatch.
- */ -export interface MetricData { +export enum ProcessingJobStatus { + COMPLETED = "Completed", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + STOPPED = "Stopped", + STOPPING = "Stopping", +} + +export interface DescribeProcessingJobResponse { /** - *The name of the metric.
+ *The inputs for a processing job.
*/ - MetricName?: string; + ProcessingInputs?: ProcessingInput[]; /** - *The value of the metric.
+ *Output configuration for the processing job.
*/ - Value?: number; + ProcessingOutputConfig?: ProcessingOutputConfig; /** - *The date and time that the algorithm emitted the metric.
+ *The name of the processing job. The name must be unique within an Amazon Web Services Region in the + * Amazon Web Services account.
*/ - Timestamp?: Date; -} + ProcessingJobName: string | undefined; -export namespace MetricData { /** - * @internal + *Identifies the resources, ML compute instances, and ML storage volumes to deploy for a + * processing job. In distributed training, you specify more than one instance.
*/ - export const filterSensitiveLog = (obj: MetricData): any => ({ - ...obj, - }); -} + ProcessingResources: ProcessingResources | undefined; -/** - *Information about the status of the rule evaluation.
- */ -export interface ProfilerRuleEvaluationStatus { /** - *The name of the rule configuration.
+ *The time limit for how long the processing job is allowed to run.
*/ - RuleConfigurationName?: string; + StoppingCondition?: ProcessingStoppingCondition; /** - *The Amazon Resource Name (ARN) of the rule evaluation job.
+ *Configures the processing job to run a specified container image.
*/ - RuleEvaluationJobArn?: string; + AppSpecification: AppSpecification | undefined; /** - *Status of the rule evaluation.
+ *The environment variables set in the Docker container.
*/ - RuleEvaluationStatus?: RuleEvaluationStatus | string; + Environment?: RecordDetails from the rule evaluation.
+ *Networking options for a processing job.
*/ - StatusDetails?: string; + NetworkConfig?: NetworkConfig; /** - *Timestamp when the rule evaluation status was last modified.
+ *The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on + * your behalf.
*/ - LastModifiedTime?: Date; -} + RoleArn?: string; -export namespace ProfilerRuleEvaluationStatus { /** - * @internal + *The configuration information used to create an experiment.
*/ - export const filterSensitiveLog = (obj: ProfilerRuleEvaluationStatus): any => ({ - ...obj, - }); -} + ExperimentConfig?: ExperimentConfig; -export enum ProfilingStatus { - DISABLED = "Disabled", - ENABLED = "Enabled", -} - -export enum SecondaryStatus { - COMPLETED = "Completed", - DOWNLOADING = "Downloading", - DOWNLOADING_TRAINING_IMAGE = "DownloadingTrainingImage", - FAILED = "Failed", - INTERRUPTED = "Interrupted", - LAUNCHING_ML_INSTANCES = "LaunchingMLInstances", - MAX_RUNTIME_EXCEEDED = "MaxRuntimeExceeded", - MAX_WAIT_TIME_EXCEEDED = "MaxWaitTimeExceeded", - PREPARING_TRAINING_STACK = "PreparingTrainingStack", - RESTARTING = "Restarting", - STARTING = "Starting", - STOPPED = "Stopped", - STOPPING = "Stopping", - TRAINING = "Training", - UPDATING = "Updating", - UPLOADING = "Uploading", -} - -/** - *An array element of DescribeTrainingJobResponse$SecondaryStatusTransitions. It provides - * additional details about a status that the training job has transitioned through. A - * training job can be in one of several states, for example, starting, downloading, - * training, or uploading. Within each state, there are a number of intermediate states. - * For example, within the starting state, SageMaker could be starting the training job or - * launching the ML instances. These transitional states are referred to as the job's - * secondary - * status. - *
- * - */ -export interface SecondaryStatusTransition { - /** - *Contains a secondary status information from a training - * job.
- *Status might be one of the following secondary statuses:
- *
- * Starting
- * - Starting the training job.
- * Downloading
- An optional stage for algorithms that
- * support File
training input mode. It indicates that
- * data is being downloaded to the ML storage volumes.
- * Training
- Training is in progress.
- * Uploading
- Training is complete and the model
- * artifacts are being uploaded to the S3 location.
- * Completed
- The training job has completed.
- * Failed
- The training job has failed. The reason for
- * the failure is returned in the FailureReason
field of
- * DescribeTrainingJobResponse
.
- * MaxRuntimeExceeded
- The job stopped because it
- * exceeded the maximum allowed runtime.
- * Stopped
- The training job has stopped.
- * Stopping
- Stopping the training job.
We no longer support the following secondary statuses:
- *
- * LaunchingMLInstances
- *
- * PreparingTrainingStack
- *
- * DownloadingTrainingImage
- *
A timestamp that shows when the training job transitioned to the current secondary - * status state.
- */ - StartTime: Date | undefined; - - /** - *A timestamp that shows when the training job transitioned out of this secondary status - * state into another secondary status state or when the training job has ended.
- */ - EndTime?: Date; - - /** - *A detailed description of the progress within a secondary status. - *
- *SageMaker provides secondary statuses and status messages that apply to each of - * them:
- *Starting the training job.
- *Launching requested ML - * instances.
- *Insufficient - * capacity error from EC2 while launching instances, - * retrying!
- *Launched - * instance was unhealthy, replacing it!
- *Preparing the instances for training.
- *Downloading the training image.
- *Training - * image download completed. Training in - * progress.
- *Status messages are subject to change. Therefore, we recommend not including them - * in code that programmatically initiates actions. For examples, don't use status - * messages in if statements.
- *To have an overview of your training job's progress, view
- * TrainingJobStatus
and SecondaryStatus
in DescribeTrainingJob, and StatusMessage
together. For
- * example, at the start of a training job, you might see the following:
- * TrainingJobStatus
- InProgress
- * SecondaryStatus
- Training
- * StatusMessage
- Downloading the training image
Name of the model training job.
- */ - TrainingJobName: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the training job.
- */ - TrainingJobArn: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the - * training job was launched by a hyperparameter tuning job.
- */ - TuningJobArn?: string; - - /** - *The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the - * transform or training job.
- */ - LabelingJobArn?: string; - - /** - *The Amazon Resource Name (ARN) of an AutoML job.
- */ - AutoMLJobArn?: string; - - /** - *Information about the Amazon S3 location that is configured for storing model artifacts. - *
- */ - ModelArtifacts: ModelArtifacts | undefined; - - /** - *The status of the training job.
- *SageMaker provides the following training job statuses:
- *
- * InProgress
- The training is in progress.
- * Completed
- The training job has completed.
- * Failed
- The training job has failed. To see the reason for the
- * failure, see the FailureReason
field in the response to a
- * DescribeTrainingJobResponse
call.
- * Stopping
- The training job is stopping.
- * Stopped
- The training job has stopped.
For more detailed information, see SecondaryStatus
.
Provides detailed information about the state of the training job. For detailed
- * information on the secondary status of the training job, see StatusMessage
- * under SecondaryStatusTransition.
SageMaker provides primary statuses and secondary statuses that apply to each of - * them:
- *
- * Starting
- * - Starting the training job.
- * Downloading
- An optional stage for algorithms that
- * support File
training input mode. It indicates that
- * data is being downloaded to the ML storage volumes.
- * Training
- Training is in progress.
- * Interrupted
- The job stopped because the managed
- * spot training instances were interrupted.
- * Uploading
- Training is complete and the model
- * artifacts are being uploaded to the S3 location.
- * Completed
- The training job has completed.
- * Failed
- The training job has failed. The reason for
- * the failure is returned in the FailureReason
field of
- * DescribeTrainingJobResponse
.
- * MaxRuntimeExceeded
- The job stopped because it
- * exceeded the maximum allowed runtime.
- * MaxWaitTimeExceeded
- The job stopped because it
- * exceeded the maximum allowed wait time.
- * Stopped
- The training job has stopped.
- * Stopping
- Stopping the training job.
Valid values for SecondaryStatus
are subject to change.
We no longer support the following secondary statuses:
- *
- * LaunchingMLInstances
- *
- * PreparingTraining
- *
- * DownloadingTrainingImage
- *
If the training job failed, the reason it failed.
- */ - FailureReason?: string; - - /** - *Algorithm-specific parameters.
- */ - HyperParameters?: RecordInformation about the algorithm used for training, and algorithm metadata. - *
- */ - AlgorithmSpecification: AlgorithmSpecification | undefined; - - /** - *The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.
- */ - RoleArn?: string; - - /** - *An array of Channel
objects that describes each data input channel.
- *
The S3 path where model artifacts that you configured when creating the job are - * stored. SageMaker creates subfolders for model artifacts.
- */ - OutputDataConfig?: OutputDataConfig; - - /** - *Resources, including ML compute instances and ML storage volumes, that are - * configured for model training.
- */ - ResourceConfig: ResourceConfig | undefined; + /** + *The Amazon Resource Name (ARN) of the processing job.
+ */ + ProcessingJobArn: string | undefined; /** - *A VpcConfig object that specifies the VPC that this training job has - * access to. For more information, see Protect Training Jobs by Using an Amazon - * Virtual Private Cloud.
+ *Provides the status of a processing job.
*/ - VpcConfig?: VpcConfig; + ProcessingJobStatus: ProcessingJobStatus | string | undefined; /** - *Specifies a limit to how long a model training job can run. It also specifies how long - * a managed Spot training job has to complete. When the job reaches the time limit, SageMaker - * ends the training job. Use this API to cap model training costs.
- *To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays
- * job termination for 120 seconds. Algorithms can use this 120-second window to save the
- * model artifacts, so the results of training are not lost.
An optional string, up to one KB in size, that contains metadata from the processing + * container when the processing job exits.
*/ - StoppingCondition: StoppingCondition | undefined; + ExitMessage?: string; /** - *A timestamp that indicates when the training job was created.
+ *A string, up to one KB in size, that contains the reason a processing job failed, if + * it failed.
*/ - CreationTime: Date | undefined; + FailureReason?: string; /** - *Indicates the time when the training job starts on training instances. You are
- * billed for the time interval between this time and the value of
- * TrainingEndTime
. The start time in CloudWatch Logs might be later than this time.
- * The difference is due to the time it takes to download the training data and to the size
- * of the training container.
The time at which the processing job completed.
*/ - TrainingStartTime?: Date; + ProcessingEndTime?: Date; /** - *Indicates the time when the training job ends on training instances. You are billed
- * for the time interval between the value of TrainingStartTime
and this time.
- * For successful jobs and stopped jobs, this is the time after model artifacts are
- * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
The time at which the processing job started.
*/ - TrainingEndTime?: Date; + ProcessingStartTime?: Date; /** - *A timestamp that indicates when the status of the training job was last - * modified.
+ *The time at which the processing job was last modified.
*/ LastModifiedTime?: Date; /** - *A history of all of the secondary statuses that the training job has transitioned - * through.
+ *The time at which the processing job was created.
*/ - SecondaryStatusTransitions?: SecondaryStatusTransition[]; + CreationTime: Date | undefined; /** - *A collection of MetricData
objects that specify the names, values, and
- * dates and times that the training algorithm emitted to Amazon CloudWatch.
The ARN of a monitoring schedule for an endpoint associated with this processing + * job.
*/ - FinalMetricDataList?: MetricData[]; + MonitoringScheduleArn?: string; /** - *If you want to allow inbound or outbound network calls, except for calls between peers
- * within a training cluster for distributed training, choose True
. If you
- * enable network isolation for training jobs that are configured to use a VPC, SageMaker
- * downloads and uploads customer data and model artifacts through the specified VPC, but
- * the training container does not have network access.
The ARN of an AutoML job associated with this processing job.
*/ - EnableNetworkIsolation?: boolean; + AutoMLJobArn?: string; /** - *To encrypt all communications between ML compute instances in distributed training,
- * choose True
. Encryption provides greater security for distributed training,
- * but training might take longer. How long it takes depends on the amount of communication
- * between compute instances, especially if you use a deep learning algorithms in
- * distributed training.
The ARN of a training job associated with this processing job.
*/ - EnableInterContainerTrafficEncryption?: boolean; + TrainingJobArn?: string; +} +export namespace DescribeProcessingJobResponse { /** - *A Boolean indicating whether managed spot training is enabled (True
) or
- * not (False
).
Contains information about the output location for managed spot training checkpoint - * data.
+ *The name of the project to describe.
*/ - CheckpointConfig?: CheckpointConfig; + ProjectName: string | undefined; +} +export namespace DescribeProjectInput { /** - *The training time in seconds.
+ * @internal */ - TrainingTimeInSeconds?: number; + export const filterSensitiveLog = (obj: DescribeProjectInput): any => ({ + ...obj, + }); +} - /** - *The billable time in seconds. Billable time refers to the absolute wall-clock - * time.
- *Multiply BillableTimeInSeconds
by the number of instances
- * (InstanceCount
) in your training cluster to get the total compute time
- * SageMaker bills you if you run distributed training. The formula is as follows:
- * BillableTimeInSeconds * InstanceCount
.
You can calculate the savings from using managed spot training using the formula
- * (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example,
- * if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is
- * 500, the savings is 80%.
Details of a provisioned service catalog product. For information about service catalog, + * see What is Amazon Web Services Service + * Catalog.
+ */ +export interface ServiceCatalogProvisionedProductDetails { /** - *Configuration information for the Debugger hook parameters, metric and tensor collections, and
- * storage paths. To learn more about
- * how to configure the DebugHookConfig
parameter,
- * see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job.
The ID of the provisioned product.
*/ - DebugHookConfig?: DebugHookConfig; + ProvisionedProductId?: string; /** - *Associates a SageMaker job as a trial component with an experiment and trial. Specified when - * you call the following APIs:
- *The current status of the product.
+ *- * CreateProcessingJob - *
+ *
+ * AVAILABLE
- Stable state, ready to perform any operation. The most recent operation succeeded and completed.
- * CreateTrainingJob - *
+ *
+ * UNDER_CHANGE
- Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.
- * CreateTransformJob - *
+ *
+ * TAINTED
- Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.
+ * ERROR
- An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.
+ * PLAN_IN_PROGRESS
- Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.
Configuration information for Debugger rules for debugging output tensors.
- */ - DebugRuleConfigurations?: DebugRuleConfiguration[]; - - /** - *Configuration of storage locations for the Debugger TensorBoard output data.
- */ - TensorBoardOutputConfig?: TensorBoardOutputConfig; - - /** - *Evaluation status of Debugger rules for debugging on a training job.
- */ - DebugRuleEvaluationStatuses?: DebugRuleEvaluationStatus[]; - - /** - *Configuration information for Debugger system monitoring, framework profiling, and - * storage paths.
- */ - ProfilerConfig?: ProfilerConfig; - - /** - *Configuration information for Debugger rules for profiling system and framework - * metrics.
- */ - ProfilerRuleConfigurations?: ProfilerRuleConfiguration[]; - - /** - *Evaluation status of Debugger rules for profiling on a training job.
- */ - ProfilerRuleEvaluationStatuses?: ProfilerRuleEvaluationStatus[]; - - /** - *Profiling status of a training job.
- */ - ProfilingStatus?: ProfilingStatus | string; - - /** - *The number of times to retry the job when the job fails due to an
- * InternalServerError
.
The environment variables to set in the Docker container.
- */ - Environment?: RecordThe name of the transform job that you want to view details of.
- */ - TransformJobName: string | undefined; + ProvisionedProductStatusMessage?: string; } -export namespace DescribeTransformJobRequest { +export namespace ServiceCatalogProvisionedProductDetails { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTransformJobRequest): any => ({ + export const filterSensitiveLog = (obj: ServiceCatalogProvisionedProductDetails): any => ({ ...obj, }); } -export enum TransformJobStatus { - COMPLETED = "Completed", - FAILED = "Failed", - IN_PROGRESS = "InProgress", - STOPPED = "Stopped", - STOPPING = "Stopping", -} - -export interface DescribeTransformJobResponse { - /** - *The name of the transform job.
- */ - TransformJobName: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the transform job.
- */ - TransformJobArn: string | undefined; - +export interface DescribeProjectOutput { /** - *The
- * status of the transform job. If the transform job failed, the reason
- * is returned in the FailureReason
field.
The Amazon Resource Name (ARN) of the project.
*/ - TransformJobStatus: TransformJobStatus | string | undefined; + ProjectArn: string | undefined; /** - *If the transform job failed, FailureReason
describes
- * why
- * it failed. A transform job creates a log file, which includes error
- * messages, and stores it
- * as
- * an Amazon S3 object. For more information, see Log Amazon SageMaker Events with
- * Amazon CloudWatch.
The name of the project.
*/ - FailureReason?: string; + ProjectName: string | undefined; /** - *The name of the model used in the transform job.
+ *The ID of the project.
*/ - ModelName: string | undefined; + ProjectId: string | undefined; /** - *The - * maximum number - * of - * parallel requests on each instance node - * that can be launched in a transform job. The default value is 1.
+ *The description of the project.
*/ - MaxConcurrentTransforms?: number; + ProjectDescription?: string; /** - *The timeout and maximum number of retries for processing a transform job - * invocation.
+ *Information used to provision a service catalog product. For information, see What is Amazon Web Services Service + * Catalog.
*/ - ModelClientConfig?: ModelClientConfig; + ServiceCatalogProvisioningDetails: ServiceCatalogProvisioningDetails | undefined; /** - *The - * maximum - * payload size, in MB, used in the - * transform job.
+ *Information about a provisioned service catalog product.
*/ - MaxPayloadInMB?: number; + ServiceCatalogProvisionedProductDetails?: ServiceCatalogProvisionedProductDetails; /** - *Specifies the number of records to include in a mini-batch for an HTTP inference - * request. - * A record - * is a single unit of input data that inference - * can be made on. For example, a single line in a CSV file is a record.
- *To enable the batch strategy, you must set SplitType
- * to
- * Line
, RecordIO
, or
- * TFRecord
.
The status of the project.
*/ - BatchStrategy?: BatchStrategy | string; + ProjectStatus: ProjectStatus | string | undefined; /** - *The - * environment variables to set in the Docker container. We support up to 16 key and values - * entries in the map.
+ *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
*/ - Environment?: RecordDescribes the dataset to be transformed and the Amazon S3 location where it is - * stored.
+ *The time when the project was created.
*/ - TransformInput: TransformInput | undefined; + CreationTime: Date | undefined; /** - *Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the - * transform job.
+ *The timestamp when project was last modified.
*/ - TransformOutput?: TransformOutput; + LastModifiedTime?: Date; /** - *Describes - * the resources, including ML instance types and ML instance count, to - * use for the transform job.
+ *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
*/ - TransformResources: TransformResources | undefined; + LastModifiedBy?: UserContext; +} +export namespace DescribeProjectOutput { /** - *A timestamp that shows when the transform Job was created.
+ * @internal */ - CreationTime: Date | undefined; + export const filterSensitiveLog = (obj: DescribeProjectOutput): any => ({ + ...obj, + }); +} +export interface DescribeStudioLifecycleConfigRequest { /** - *Indicates when the transform job starts
- * on
- * ML instances. You are billed for the time interval between this time
- * and the value of TransformEndTime
.
The name of the Studio Lifecycle Configuration to describe.
*/ - TransformStartTime?: Date; + StudioLifecycleConfigName: string | undefined; +} +export namespace DescribeStudioLifecycleConfigRequest { /** - *Indicates when the transform job has been
- *
- * completed, or has stopped or failed. You are billed for the time
- * interval between this time and the value of TransformStartTime
.
The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the - * transform or training job.
+ *The ARN of the Lifecycle Configuration to describe.
*/ - LabelingJobArn?: string; + StudioLifecycleConfigArn?: string; /** - *The Amazon Resource Name (ARN) of the AutoML transform job.
+ *The name of the Studio Lifecycle Configuration that is described.
*/ - AutoMLJobArn?: string; + StudioLifecycleConfigName?: string; /** - *The data structure used to specify the data to be used for inference in a batch - * transform job and to associate the data that is relevant to the prediction results in - * the output. The input filter provided allows you to exclude input data that is not - * needed for inference in a batch transform job. The output filter provided allows you to - * include input data relevant to interpreting the predictions in the output from the job. - * For more information, see Associate Prediction - * Results with their Corresponding Input Records.
+ *The creation time of the Studio Lifecycle Configuration.
*/ - DataProcessing?: DataProcessing; + CreationTime?: Date; /** - *Associates a SageMaker job as a trial component with an experiment and trial. Specified when - * you call the following APIs:
- *- * CreateProcessingJob - *
- *- * CreateTrainingJob - *
- *- * CreateTransformJob - *
- *This value is equivalent to CreationTime because Studio Lifecycle Configurations are immutable.
*/ - ExperimentConfig?: ExperimentConfig; -} + LastModifiedTime?: Date; -export namespace DescribeTransformJobResponse { /** - * @internal + *The content of your Studio Lifecycle Configuration script.
*/ - export const filterSensitiveLog = (obj: DescribeTransformJobResponse): any => ({ - ...obj, - }); -} + StudioLifecycleConfigContent?: string; -export interface DescribeTrialRequest { /** - *The name of the trial to describe.
+ *The App type that the Lifecycle Configuration is attached to.
*/ - TrialName: string | undefined; + StudioLifecycleConfigAppType?: StudioLifecycleConfigAppType | string; } -export namespace DescribeTrialRequest { +export namespace DescribeStudioLifecycleConfigResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTrialRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeStudioLifecycleConfigResponse): any => ({ ...obj, }); } -/** - *The source of the trial.
- */ -export interface TrialSource { - /** - *The Amazon Resource Name (ARN) of the source.
- */ - SourceArn: string | undefined; - +export interface DescribeSubscribedWorkteamRequest { /** - *The source job type.
+ *The Amazon Resource Name (ARN) of the subscribed work team to describe.
*/ - SourceType?: string; + WorkteamArn: string | undefined; } -export namespace TrialSource { +export namespace DescribeSubscribedWorkteamRequest { /** * @internal */ - export const filterSensitiveLog = (obj: TrialSource): any => ({ + export const filterSensitiveLog = (obj: DescribeSubscribedWorkteamRequest): any => ({ ...obj, }); } -export interface DescribeTrialResponse { - /** - *The name of the trial.
- */ - TrialName?: string; - +/** + *Describes a work team of a vendor that does the a labelling job.
+ */ +export interface SubscribedWorkteam { /** - *The Amazon Resource Name (ARN) of the trial.
+ *The Amazon Resource Name (ARN) of the vendor that you have subscribed.
*/ - TrialArn?: string; + WorkteamArn: string | undefined; /** - *The name of the trial as displayed. If DisplayName
isn't specified,
- * TrialName
is displayed.
The title of the service provided by the vendor in the Amazon Marketplace.
*/ - DisplayName?: string; + MarketplaceTitle?: string; /** - *The name of the experiment the trial is part of.
+ *The name of the vendor in the Amazon Marketplace.
*/ - ExperimentName?: string; + SellerName?: string; /** - *The Amazon Resource Name (ARN) of the source and, optionally, the job type.
+ *The description of the vendor from the Amazon Marketplace.
*/ - Source?: TrialSource; + MarketplaceDescription?: string; /** - *When the trial was created.
+ *Marketplace product listing ID.
*/ - CreationTime?: Date; + ListingId?: string; +} +export namespace SubscribedWorkteam { /** - *Who created the trial.
+ * @internal */ - CreatedBy?: UserContext; + export const filterSensitiveLog = (obj: SubscribedWorkteam): any => ({ + ...obj, + }); +} +export interface DescribeSubscribedWorkteamResponse { /** - *When the trial was last modified.
+ *A Workteam
instance that contains information about the work team.
Who last modified the trial.
+ * @internal */ - LastModifiedBy?: UserContext; + export const filterSensitiveLog = (obj: DescribeSubscribedWorkteamResponse): any => ({ + ...obj, + }); +} +export interface DescribeTrainingJobRequest { /** - *Metadata properties of the tracking entity, trial, or trial component.
+ *The name of the training job.
*/ - MetadataProperties?: MetadataProperties; + TrainingJobName: string | undefined; } -export namespace DescribeTrialResponse { +export namespace DescribeTrainingJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTrialResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeTrainingJobRequest): any => ({ ...obj, }); } -export interface DescribeTrialComponentRequest { +/** + *The name, value, and date and time of a metric that was emitted to Amazon CloudWatch.
+ */ +export interface MetricData { + /** + *The name of the metric.
+ */ + MetricName?: string; + + /** + *The value of the metric.
+ */ + Value?: number; + /** - *The name of the trial component to describe.
+ *The date and time that the algorithm emitted the metric.
*/ - TrialComponentName: string | undefined; + Timestamp?: Date; } -export namespace DescribeTrialComponentRequest { +export namespace MetricData { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeTrialComponentRequest): any => ({ + export const filterSensitiveLog = (obj: MetricData): any => ({ ...obj, }); } /** - *A summary of the metrics of a trial component.
+ *Information about the status of the rule evaluation.
*/ -export interface TrialComponentMetricSummary { +export interface ProfilerRuleEvaluationStatus { /** - *The name of the metric.
+ *The name of the rule configuration.
*/ - MetricName?: string; + RuleConfigurationName?: string; /** - *The Amazon Resource Name (ARN) of the source.
+ *The Amazon Resource Name (ARN) of the rule evaluation job.
*/ - SourceArn?: string; + RuleEvaluationJobArn?: string; /** - *When the metric was last updated.
+ *Status of the rule evaluation.
*/ - TimeStamp?: Date; + RuleEvaluationStatus?: RuleEvaluationStatus | string; /** - *The maximum value of the metric.
+ *Details from the rule evaluation.
*/ - Max?: number; + StatusDetails?: string; /** - *The minimum value of the metric.
+ *Timestamp when the rule evaluation status was last modified.
*/ - Min?: number; + LastModifiedTime?: Date; +} +export namespace ProfilerRuleEvaluationStatus { /** - *The most recent value of the metric.
+ * @internal */ - Last?: number; + export const filterSensitiveLog = (obj: ProfilerRuleEvaluationStatus): any => ({ + ...obj, + }); +} + +export enum ProfilingStatus { + DISABLED = "Disabled", + ENABLED = "Enabled", +} + +export enum SecondaryStatus { + COMPLETED = "Completed", + DOWNLOADING = "Downloading", + DOWNLOADING_TRAINING_IMAGE = "DownloadingTrainingImage", + FAILED = "Failed", + INTERRUPTED = "Interrupted", + LAUNCHING_ML_INSTANCES = "LaunchingMLInstances", + MAX_RUNTIME_EXCEEDED = "MaxRuntimeExceeded", + MAX_WAIT_TIME_EXCEEDED = "MaxWaitTimeExceeded", + PREPARING_TRAINING_STACK = "PreparingTrainingStack", + RESTARTING = "Restarting", + STARTING = "Starting", + STOPPED = "Stopped", + STOPPING = "Stopping", + TRAINING = "Training", + UPDATING = "Updating", + UPLOADING = "Uploading", +} +/** + *An array element of DescribeTrainingJobResponse$SecondaryStatusTransitions. It provides + * additional details about a status that the training job has transitioned through. A + * training job can be in one of several states, for example, starting, downloading, + * training, or uploading. Within each state, there are a number of intermediate states. + * For example, within the starting state, SageMaker could be starting the training job or + * launching the ML instances. These transitional states are referred to as the job's + * secondary + * status. + *
+ * + */ +export interface SecondaryStatusTransition { /** - *The number of samples used to generate the metric.
+ *Contains a secondary status information from a training + * job.
+ *Status might be one of the following secondary statuses:
+ *
+ * Starting
+ * - Starting the training job.
+ * Downloading
- An optional stage for algorithms that
+ * support File
training input mode. It indicates that
+ * data is being downloaded to the ML storage volumes.
+ * Training
- Training is in progress.
+ * Uploading
- Training is complete and the model
+ * artifacts are being uploaded to the S3 location.
+ * Completed
- The training job has completed.
+ * Failed
- The training job has failed. The reason for
+ * the failure is returned in the FailureReason
field of
+ * DescribeTrainingJobResponse
.
+ * MaxRuntimeExceeded
- The job stopped because it
+ * exceeded the maximum allowed runtime.
+ * Stopped
- The training job has stopped.
+ * Stopping
- Stopping the training job.
We no longer support the following secondary statuses:
+ *
+ * LaunchingMLInstances
+ *
+ * PreparingTrainingStack
+ *
+ * DownloadingTrainingImage
+ *
The average value of the metric.
+ *A timestamp that shows when the training job transitioned to the current secondary + * status state.
*/ - Avg?: number; + StartTime: Date | undefined; /** - *The standard deviation of the metric.
+ *A timestamp that shows when the training job transitioned out of this secondary status + * state into another secondary status state or when the training job has ended.
*/ - StdDev?: number; + EndTime?: Date; + + /** + *A detailed description of the progress within a secondary status. + *
+ *SageMaker provides secondary statuses and status messages that apply to each of + * them:
+ *Starting the training job.
+ *Launching requested ML + * instances.
+ *Insufficient + * capacity error from EC2 while launching instances, + * retrying!
+ *Launched + * instance was unhealthy, replacing it!
+ *Preparing the instances for training.
+ *Downloading the training image.
+ *Training + * image download completed. Training in + * progress.
+ *Status messages are subject to change. Therefore, we recommend not including them + * in code that programmatically initiates actions. For examples, don't use status + * messages in if statements.
+ *To have an overview of your training job's progress, view
+ * TrainingJobStatus
and SecondaryStatus
in DescribeTrainingJob, and StatusMessage
together. For
+ * example, at the start of a training job, you might see the following:
+ * TrainingJobStatus
- InProgress
+ * SecondaryStatus
- Training
+ * StatusMessage
- Downloading the training image
The Amazon Resource Name (ARN) and job type of the source of a trial component.
- */ -export interface TrialComponentSource { +export interface DescribeTrainingJobResponse { /** - *The source ARN.
+ *Name of the model training job.
*/ - SourceArn: string | undefined; + TrainingJobName: string | undefined; /** - *The source job type.
+ *The Amazon Resource Name (ARN) of the training job.
*/ - SourceType?: string; -} + TrainingJobArn: string | undefined; -export namespace TrialComponentSource { /** - * @internal + *The Amazon Resource Name (ARN) of the associated hyperparameter tuning job if the + * training job was launched by a hyperparameter tuning job.
*/ - export const filterSensitiveLog = (obj: TrialComponentSource): any => ({ - ...obj, - }); -} + TuningJobArn?: string; -export interface DescribeTrialComponentResponse { /** - *The name of the trial component.
+ *The Amazon Resource Name (ARN) of the SageMaker Ground Truth labeling job that created the + * transform or training job.
*/ - TrialComponentName?: string; + LabelingJobArn?: string; /** - *The Amazon Resource Name (ARN) of the trial component.
+ *The Amazon Resource Name (ARN) of an AutoML job.
*/ - TrialComponentArn?: string; + AutoMLJobArn?: string; /** - *The name of the component as displayed. If DisplayName
isn't specified,
- * TrialComponentName
is displayed.
Information about the Amazon S3 location that is configured for storing model artifacts. + *
*/ - DisplayName?: string; + ModelArtifacts: ModelArtifacts | undefined; /** - *The Amazon Resource Name (ARN) of the source and, optionally, the job type.
+ *The status of the training job.
+ *SageMaker provides the following training job statuses:
+ *
+ * InProgress
- The training is in progress.
+ * Completed
- The training job has completed.
+ * Failed
- The training job has failed. To see the reason for the
+ * failure, see the FailureReason
field in the response to a
+ * DescribeTrainingJobResponse
call.
+ * Stopping
- The training job is stopping.
+ * Stopped
- The training job has stopped.
For more detailed information, see SecondaryStatus
.
The status of the component. States include:
- * Provides detailed information about the state of the training job. For detailed
+ * information on the secondary status of the training job, see StatusMessage
+ * under SecondaryStatusTransition.
SageMaker provides primary statuses and secondary statuses that apply to each of + * them:
+ *
+ * Starting
+ * - Starting the training job.
+ * Downloading
- An optional stage for algorithms that
+ * support File
training input mode. It indicates that
+ * data is being downloaded to the ML storage volumes.
+ * Training
- Training is in progress.
+ * Interrupted
- The job stopped because the managed
+ * spot training instances were interrupted.
+ * Uploading
- Training is complete and the model
+ * artifacts are being uploaded to the S3 location.
+ * Completed
- The training job has completed.
+ * Failed
- The training job has failed. The reason for
+ * the failure is returned in the FailureReason
field of
+ * DescribeTrainingJobResponse
.
+ * MaxRuntimeExceeded
- The job stopped because it
+ * exceeded the maximum allowed runtime.
+ * MaxWaitTimeExceeded
- The job stopped because it
+ * exceeded the maximum allowed wait time.
+ * Stopped
- The training job has stopped.
+ * Stopping
- Stopping the training job.
Valid values for SecondaryStatus
are subject to change.
We no longer support the following secondary statuses:
+ *InProgress
+ *
+ * LaunchingMLInstances
+ *
Completed
+ *
+ * PreparingTraining
+ *
Failed
+ *
+ * DownloadingTrainingImage
+ *
When the component started.
+ *If the training job failed, the reason it failed.
*/ - StartTime?: Date; + FailureReason?: string; /** - *When the component ended.
+ *Algorithm-specific parameters.
*/ - EndTime?: Date; + HyperParameters?: RecordWhen the component was created.
+ *Information about the algorithm used for training, and algorithm metadata. + *
*/ - CreationTime?: Date; + AlgorithmSpecification: AlgorithmSpecification | undefined; /** - *Who created the trial component.
+ *The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.
*/ - CreatedBy?: UserContext; + RoleArn?: string; /** - *When the component was last modified.
+ *An array of Channel
objects that describes each data input channel.
+ *
Who last modified the component.
+ *The S3 path where model artifacts that you configured when creating the job are + * stored. SageMaker creates subfolders for model artifacts.
*/ - LastModifiedBy?: UserContext; + OutputDataConfig?: OutputDataConfig; /** - *The hyperparameters of the component.
+ *Resources, including ML compute instances and ML storage volumes, that are + * configured for model training.
*/ - Parameters?: RecordThe input artifacts of the component.
+ *A VpcConfig object that specifies the VPC that this training job has + * access to. For more information, see Protect Training Jobs by Using an Amazon + * Virtual Private Cloud.
*/ - InputArtifacts?: RecordThe output artifacts of the component.
+ *Specifies a limit to how long a model training job can run. It also specifies how long + * a managed Spot training job has to complete. When the job reaches the time limit, SageMaker + * ends the training job. Use this API to cap model training costs.
+ *To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays
+ * job termination for 120 seconds. Algorithms can use this 120-second window to save the
+ * model artifacts, so the results of training are not lost.
A timestamp that indicates when the training job was created.
+ */ + CreationTime: Date | undefined; + + /** + *Indicates the time when the training job starts on training instances. You are
+ * billed for the time interval between this time and the value of
+ * TrainingEndTime
. The start time in CloudWatch Logs might be later than this time.
+ * The difference is due to the time it takes to download the training data and to the size
+ * of the training container.
Indicates the time when the training job ends on training instances. You are billed
+ * for the time interval between the value of TrainingStartTime
and this time.
+ * For successful jobs and stopped jobs, this is the time after model artifacts are
+ * uploaded. For failed jobs, this is the time when SageMaker detects a job failure.
A timestamp that indicates when the status of the training job was last + * modified.
+ */ + LastModifiedTime?: Date; + + /** + *A history of all of the secondary statuses that the training job has transitioned + * through.
*/ - OutputArtifacts?: RecordMetadata properties of the tracking entity, trial, or trial component.
+ *A collection of MetricData
objects that specify the names, values, and
+ * dates and times that the training algorithm emitted to Amazon CloudWatch.
The metrics for the component.
+ *If you want to allow inbound or outbound network calls, except for calls between peers
+ * within a training cluster for distributed training, choose True
. If you
+ * enable network isolation for training jobs that are configured to use a VPC, SageMaker
+ * downloads and uploads customer data and model artifacts through the specified VPC, but
+ * the training container does not have network access.
The Amazon Resource Name (ARN) of the lineage group.
+ *To encrypt all communications between ML compute instances in distributed training,
+ * choose True
. Encryption provides greater security for distributed training,
+ * but training might take longer. How long it takes depends on the amount of communication
+ * between compute instances, especially if you use a deep learning algorithms in
+ * distributed training.
A Boolean indicating whether managed spot training is enabled (True
) or
+ * not (False
).
The domain ID.
+ *Contains information about the output location for managed spot training checkpoint + * data.
*/ - DomainId: string | undefined; + CheckpointConfig?: CheckpointConfig; /** - *The user profile name. This value is not case sensitive.
+ *The training time in seconds.
*/ - UserProfileName: string | undefined; -} + TrainingTimeInSeconds?: number; -export namespace DescribeUserProfileRequest { /** - * @internal + *The billable time in seconds. Billable time refers to the absolute wall-clock + * time.
+ *Multiply BillableTimeInSeconds
by the number of instances
+ * (InstanceCount
) in your training cluster to get the total compute time
+ * SageMaker bills you if you run distributed training. The formula is as follows:
+ * BillableTimeInSeconds * InstanceCount
.
You can calculate the savings from using managed spot training using the formula
+ * (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example,
+ * if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is
+ * 500, the savings is 80%.
The ID of the domain that contains the profile.
+ *Configuration information for the Debugger hook parameters, metric and tensor collections, and
+ * storage paths. To learn more about
+ * how to configure the DebugHookConfig
parameter,
+ * see Use the SageMaker and Debugger Configuration API Operations to Create, Update, and Debug Your Training Job.
The user profile Amazon Resource Name (ARN).
+ *Associates a SageMaker job as a trial component with an experiment and trial. Specified when + * you call the following APIs:
+ *+ * CreateProcessingJob + *
+ *+ * CreateTrainingJob + *
+ *+ * CreateTransformJob + *
+ *The user profile name.
+ *Configuration information for Debugger rules for debugging output tensors.
*/ - UserProfileName?: string; + DebugRuleConfigurations?: DebugRuleConfiguration[]; /** - *The ID of the user's profile in the Amazon Elastic File System (EFS) volume.
+ *Configuration of storage locations for the Debugger TensorBoard output data.
*/ - HomeEfsFileSystemUid?: string; + TensorBoardOutputConfig?: TensorBoardOutputConfig; /** - *The status.
+ *Evaluation status of Debugger rules for debugging on a training job.
*/ - Status?: UserProfileStatus | string; + DebugRuleEvaluationStatuses?: DebugRuleEvaluationStatus[]; /** - *The last modified time.
+ *Configuration information for Debugger system monitoring, framework profiling, and + * storage paths.
*/ - LastModifiedTime?: Date; + ProfilerConfig?: ProfilerConfig; /** - *The creation time.
+ *Configuration information for Debugger rules for profiling system and framework + * metrics.
*/ - CreationTime?: Date; + ProfilerRuleConfigurations?: ProfilerRuleConfiguration[]; /** - *The failure reason.
+ *Evaluation status of Debugger rules for profiling on a training job.
*/ - FailureReason?: string; + ProfilerRuleEvaluationStatuses?: ProfilerRuleEvaluationStatus[]; /** - *The SSO user identifier.
+ *Profiling status of a training job.
*/ - SingleSignOnUserIdentifier?: string; + ProfilingStatus?: ProfilingStatus | string; /** - *The SSO user value.
+ *The number of times to retry the job when the job fails due to an
+ * InternalServerError
.
A collection of settings.
+ *The environment variables to set in the Docker container.
*/ - UserSettings?: UserSettings; + Environment?: RecordThe name of the private workforce whose access you want to restrict.
- * WorkforceName
is automatically set to default
when a
- * workforce is created and cannot be modified.
The name of the transform job that you want to view details of.
*/ - WorkforceName: string | undefined; + TransformJobName: string | undefined; } -export namespace DescribeWorkforceRequest { +export namespace DescribeTransformJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeWorkforceRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeTransformJobRequest): any => ({ ...obj, }); } -/** - *Your OIDC IdP workforce configuration.
- */ -export interface OidcConfigForResponse { - /** - *The OIDC IdP client ID used to configure your private workforce.
- */ - ClientId?: string; - - /** - *The OIDC IdP issuer used to configure your private workforce.
- */ - Issuer?: string; - - /** - *The OIDC IdP authorization endpoint used to configure your private workforce.
- */ - AuthorizationEndpoint?: string; - - /** - *The OIDC IdP token endpoint used to configure your private workforce.
- */ - TokenEndpoint?: string; +export enum TransformJobStatus { + COMPLETED = "Completed", + FAILED = "Failed", + IN_PROGRESS = "InProgress", + STOPPED = "Stopped", + STOPPING = "Stopping", +} +export interface DescribeTransformJobResponse { /** - *The OIDC IdP user information endpoint used to configure your private workforce.
+ *The name of the transform job.
*/ - UserInfoEndpoint?: string; + TransformJobName: string | undefined; /** - *The OIDC IdP logout endpoint used to configure your private workforce.
+ *The Amazon Resource Name (ARN) of the transform job.
*/ - LogoutEndpoint?: string; + TransformJobArn: string | undefined; /** - *The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.
+ *The
+ * status of the transform job. If the transform job failed, the reason
+ * is returned in the FailureReason
field.
If the transform job failed, FailureReason
describes
+ * why
+ * it failed. A transform job creates a log file, which includes error
+ * messages, and stores it
+ * as
+ * an Amazon S3 object. For more information, see Log Amazon SageMaker Events with
+ * Amazon CloudWatch.
A VpcConfig object that specifies the VPC that you want your workforce to connect to.
- */ -export interface WorkforceVpcConfigResponse { /** - *The ID of the VPC that the workforce uses for communication.
+ *The name of the model used in the transform job.
*/ - VpcId: string | undefined; + ModelName: string | undefined; /** - *The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.
+ *The + * maximum number + * of + * parallel requests on each instance node + * that can be launched in a transform job. The default value is 1.
*/ - SecurityGroupIds: string[] | undefined; + MaxConcurrentTransforms?: number; /** - *The ID of the subnets in the VPC that you want to connect.
+ *The timeout and maximum number of retries for processing a transform job + * invocation.
*/ - Subnets: string[] | undefined; + ModelClientConfig?: ModelClientConfig; /** - *The IDs for the VPC service endpoints of your VPC workforce when it is created and updated.
+ *The + * maximum + * payload size, in MB, used in the + * transform job.
*/ - VpcEndpointId?: string; -} + MaxPayloadInMB?: number; -export namespace WorkforceVpcConfigResponse { /** - * @internal + *Specifies the number of records to include in a mini-batch for an HTTP inference + * request. + * A record + * is a single unit of input data that inference + * can be made on. For example, a single line in a CSV file is a record.
+ *To enable the batch strategy, you must set SplitType
+ * to
+ * Line
, RecordIO
, or
+ * TFRecord
.
A single private workforce, which is automatically created when you create your first - * private work team. You can create one private work force in each Amazon Web Services Region. By default, - * any workforce-related API operation used in a specific region will apply to the - * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.
- */ -export interface Workforce { /** - *The name of the private workforce.
+ *The + * environment variables to set in the Docker container. We support up to 16 key and values + * entries in the map.
*/ - WorkforceName: string | undefined; + Environment?: RecordThe Amazon Resource Name (ARN) of the private workforce.
+ *Describes the dataset to be transformed and the Amazon S3 location where it is + * stored.
*/ - WorkforceArn: string | undefined; + TransformInput: TransformInput | undefined; /** - *The most recent date that was used to - * successfully add one or more IP address ranges (CIDRs) to a private workforce's - * allow list.
+ *Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the + * transform job.
*/ - LastUpdatedDate?: Date; + TransformOutput?: TransformOutput; /** - *A list of one to ten IP address ranges (CIDRs) to be added to the - * workforce allow list. By default, a workforce isn't restricted to specific IP addresses.
+ *Describes + * the resources, including ML instance types and ML instance count, to + * use for the transform job.
*/ - SourceIpConfig?: SourceIpConfig; + TransformResources: TransformResources | undefined; /** - *The subdomain for your OIDC Identity Provider.
+ *A timestamp that shows when the transform Job was created.
*/ - SubDomain?: string; + CreationTime: Date | undefined; /** - *The configuration of an Amazon Cognito workforce. - * A single Cognito workforce is created using and corresponds to a single - * - * Amazon Cognito user pool.
+ *Indicates when the transform job starts
+ * on
+ * ML instances. You are billed for the time interval between this time
+ * and the value of TransformEndTime
.
The configuration of an OIDC Identity Provider (IdP) private workforce.
+ *Indicates when the transform job has been
+ *
+ * completed, or has stopped or failed. You are billed for the time
+ * interval between this time and the value of TransformStartTime
.
The date that the workforce is created.
+ *The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the + * transform or training job.
*/ - CreateDate?: Date; + LabelingJobArn?: string; /** - *The configuration of a VPC workforce.
+ *The Amazon Resource Name (ARN) of the AutoML transform job.
*/ - WorkforceVpcConfig?: WorkforceVpcConfigResponse; + AutoMLJobArn?: string; /** - *The status of your workforce.
+ *The data structure used to specify the data to be used for inference in a batch + * transform job and to associate the data that is relevant to the prediction results in + * the output. The input filter provided allows you to exclude input data that is not + * needed for inference in a batch transform job. The output filter provided allows you to + * include input data relevant to interpreting the predictions in the output from the job. + * For more information, see Associate Prediction + * Results with their Corresponding Input Records.
*/ - Status?: WorkforceStatus | string; + DataProcessing?: DataProcessing; /** - *The reason your workforce failed.
+ *Associates a SageMaker job as a trial component with an experiment and trial. Specified when + * you call the following APIs:
+ *+ * CreateProcessingJob + *
+ *+ * CreateTrainingJob + *
+ *+ * CreateTransformJob + *
+ *A single private workforce, which is automatically created when you create your first - * private work team. You can create one private work force in each Amazon Web Services Region. By default, - * any workforce-related API operation used in a specific region will apply to the - * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.
+ *The name of the trial to describe.
*/ - Workforce: Workforce | undefined; + TrialName: string | undefined; } -export namespace DescribeWorkforceResponse { +export namespace DescribeTrialRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeWorkforceResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeTrialRequest): any => ({ ...obj, }); } -export interface DescribeWorkteamRequest { +/** + *The source of the trial.
+ */ +export interface TrialSource { /** - *The name of the work team to return a description of.
+ *The Amazon Resource Name (ARN) of the source.
*/ - WorkteamName: string | undefined; + SourceArn: string | undefined; + + /** + *The source job type.
+ */ + SourceType?: string; } -export namespace DescribeWorkteamRequest { +export namespace TrialSource { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeWorkteamRequest): any => ({ + export const filterSensitiveLog = (obj: TrialSource): any => ({ ...obj, }); } -/** - *Provides details about a labeling work team.
- */ -export interface Workteam { +export interface DescribeTrialResponse { /** - *The name of the work team.
+ *The name of the trial.
*/ - WorkteamName: string | undefined; + TrialName?: string; /** - *A list of MemberDefinition
objects that contains objects that identify
- * the workers that make up the work team.
Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP).
- * For private workforces created using Amazon Cognito use
- * CognitoMemberDefinition
. For workforces created using your own OIDC identity
- * provider (IdP) use OidcMemberDefinition
.
The Amazon Resource Name (ARN) of the trial.
*/ - MemberDefinitions: MemberDefinition[] | undefined; + TrialArn?: string; /** - *The Amazon Resource Name (ARN) that identifies the work team.
+ *The name of the trial as displayed. If DisplayName
isn't specified,
+ * TrialName
is displayed.
The Amazon Resource Name (ARN) of the workforce.
+ *The name of the experiment the trial is part of.
*/ - WorkforceArn?: string; + ExperimentName?: string; /** - *The Amazon Marketplace identifier for a vendor's work team.
+ *The Amazon Resource Name (ARN) of the source and, optionally, the job type.
*/ - ProductListingIds?: string[]; + Source?: TrialSource; /** - *A description of the work team.
+ *When the trial was created.
*/ - Description: string | undefined; + CreationTime?: Date; /** - *The URI of the labeling job's user interface. Workers open this URI to start labeling - * your data objects.
+ *Who created the trial.
*/ - SubDomain?: string; + CreatedBy?: UserContext; /** - *The date and time that the work team was created (timestamp).
+ *When the trial was last modified.
*/ - CreateDate?: Date; + LastModifiedTime?: Date; /** - *The date and time that the work team was last updated (timestamp).
+ *Who last modified the trial.
*/ - LastUpdatedDate?: Date; + LastModifiedBy?: UserContext; /** - *Configures SNS notifications of available or expiring work items for work - * teams.
+ *Metadata properties of the tracking entity, trial, or trial component.
*/ - NotificationConfiguration?: NotificationConfiguration; + MetadataProperties?: MetadataProperties; } -export namespace Workteam { +export namespace DescribeTrialResponse { /** * @internal */ - export const filterSensitiveLog = (obj: Workteam): any => ({ + export const filterSensitiveLog = (obj: DescribeTrialResponse): any => ({ ...obj, }); } -export interface DescribeWorkteamResponse { +export interface DescribeTrialComponentRequest { /** - *A Workteam
instance that contains information about the work team.
- *
The name of the trial component to describe.
*/ - Workteam: Workteam | undefined; + TrialComponentName: string | undefined; } -export namespace DescribeWorkteamResponse { +export namespace DescribeTrialComponentRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeWorkteamResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeTrialComponentRequest): any => ({ ...obj, }); } /** - *Specifies weight and capacity values for a production variant.
+ *A summary of the metrics of a trial component.
*/ -export interface DesiredWeightAndCapacity { - /** - *The name of the variant to update.
- */ - VariantName: string | undefined; - - /** - *The variant's weight.
- */ - DesiredWeight?: number; - - /** - *The variant's capacity.
- */ - DesiredInstanceCount?: number; -} - -export namespace DesiredWeightAndCapacity { +export interface TrialComponentMetricSummary { /** - * @internal + *The name of the metric.
*/ - export const filterSensitiveLog = (obj: DesiredWeightAndCapacity): any => ({ - ...obj, - }); -} + MetricName?: string; -/** - *Information of a particular device.
- */ -export interface Device { /** - *The name of the device.
+ *The Amazon Resource Name (ARN) of the source.
*/ - DeviceName: string | undefined; + SourceArn?: string; /** - *Description of the device.
+ *When the metric was last updated.
*/ - Description?: string; + TimeStamp?: Date; /** - *Amazon Web Services Internet of Things (IoT) object name.
+ *The maximum value of the metric.
*/ - IotThingName?: string; -} + Max?: number; -export namespace Device { /** - * @internal + *The minimum value of the metric.
*/ - export const filterSensitiveLog = (obj: Device): any => ({ - ...obj, - }); -} + Min?: number; -/** - *Summary of the device fleet.
- */ -export interface DeviceFleetSummary { /** - *Amazon Resource Name (ARN) of the device fleet.
+ *The most recent value of the metric.
*/ - DeviceFleetArn: string | undefined; + Last?: number; /** - *Name of the device fleet.
+ *The number of samples used to generate the metric.
*/ - DeviceFleetName: string | undefined; + Count?: number; /** - *Timestamp of when the device fleet was created.
+ *The average value of the metric.
*/ - CreationTime?: Date; + Avg?: number; /** - *Timestamp of when the device fleet was last updated.
+ *The standard deviation of the metric.
*/ - LastModifiedTime?: Date; + StdDev?: number; } -export namespace DeviceFleetSummary { +export namespace TrialComponentMetricSummary { /** * @internal */ - export const filterSensitiveLog = (obj: DeviceFleetSummary): any => ({ + export const filterSensitiveLog = (obj: TrialComponentMetricSummary): any => ({ ...obj, }); } /** - *Status of devices.
+ *The Amazon Resource Name (ARN) and job type of the source of a trial component.
*/ -export interface DeviceStats { +export interface TrialComponentSource { /** - *The number of devices connected with a heartbeat.
+ *The source ARN.
*/ - ConnectedDeviceCount: number | undefined; + SourceArn: string | undefined; /** - *The number of registered devices.
+ *The source job type.
*/ - RegisteredDeviceCount: number | undefined; + SourceType?: string; } -export namespace DeviceStats { +export namespace TrialComponentSource { /** * @internal */ - export const filterSensitiveLog = (obj: DeviceStats): any => ({ + export const filterSensitiveLog = (obj: TrialComponentSource): any => ({ ...obj, }); } -/** - *Summary of model on edge device.
- */ -export interface EdgeModelSummary { +export interface DescribeTrialComponentResponse { /** - *The name of the model.
+ *The name of the trial component.
*/ - ModelName: string | undefined; + TrialComponentName?: string; /** - *The version model.
+ *The Amazon Resource Name (ARN) of the trial component.
*/ - ModelVersion: string | undefined; -} + TrialComponentArn?: string; -export namespace EdgeModelSummary { /** - * @internal + *The name of the component as displayed. If DisplayName
isn't specified,
+ * TrialComponentName
is displayed.
Summary of the device.
- */ -export interface DeviceSummary { /** - *The unique identifier of the device.
+ *The Amazon Resource Name (ARN) of the source and, optionally, the job type.
*/ - DeviceName: string | undefined; + Source?: TrialComponentSource; /** - *Amazon Resource Name (ARN) of the device.
+ *The status of the component. States include:
+ *InProgress
+ *Completed
+ *Failed
+ *A description of the device.
+ *When the component started.
*/ - Description?: string; + StartTime?: Date; /** - *The name of the fleet the device belongs to.
+ *When the component ended.
*/ - DeviceFleetName?: string; + EndTime?: Date; /** - *The Amazon Web Services Internet of Things (IoT) object thing name associated with the device..
+ *When the component was created.
*/ - IotThingName?: string; + CreationTime?: Date; /** - *The timestamp of the last registration or de-reregistration.
+ *Who created the trial component.
*/ - RegistrationTime?: Date; + CreatedBy?: UserContext; /** - *The last heartbeat received from the device.
+ *When the component was last modified.
*/ - LatestHeartbeat?: Date; + LastModifiedTime?: Date; /** - *Models on the device.
+ *Who last modified the component.
*/ - Models?: EdgeModelSummary[]; + LastModifiedBy?: UserContext; /** - *Edge Manager agent version.
+ *The hyperparameters of the component.
*/ - AgentVersion?: string; -} + Parameters?: RecordThe input artifacts of the component.
*/ - export const filterSensitiveLog = (obj: DeviceSummary): any => ({ - ...obj, - }); -} - -export enum Direction { - ASCENDANTS = "Ascendants", - BOTH = "Both", - DESCENDANTS = "Descendants", -} - -export interface DisableSagemakerServicecatalogPortfolioInput {} + InputArtifacts?: RecordThe output artifacts of the component.
*/ - export const filterSensitiveLog = (obj: DisableSagemakerServicecatalogPortfolioInput): any => ({ - ...obj, - }); -} - -export interface DisableSagemakerServicecatalogPortfolioOutput {} + OutputArtifacts?: RecordMetadata properties of the tracking entity, trial, or trial component.
*/ - export const filterSensitiveLog = (obj: DisableSagemakerServicecatalogPortfolioOutput): any => ({ - ...obj, - }); -} + MetadataProperties?: MetadataProperties; -export interface DisassociateTrialComponentRequest { /** - *The name of the component to disassociate from the trial.
+ *The metrics for the component.
*/ - TrialComponentName: string | undefined; + Metrics?: TrialComponentMetricSummary[]; /** - *The name of the trial to disassociate from.
+ *The Amazon Resource Name (ARN) of the lineage group.
*/ - TrialName: string | undefined; + LineageGroupArn?: string; } -export namespace DisassociateTrialComponentRequest { +export namespace DescribeTrialComponentResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DisassociateTrialComponentRequest): any => ({ + export const filterSensitiveLog = (obj: DescribeTrialComponentResponse): any => ({ ...obj, + ...(obj.Parameters && { + Parameters: Object.entries(obj.Parameters).reduce( + (acc: any, [key, value]: [string, TrialComponentParameterValue]) => ({ + ...acc, + [key]: TrialComponentParameterValue.filterSensitiveLog(value), + }), + {} + ), + }), }); } -export interface DisassociateTrialComponentResponse { +export interface DescribeUserProfileRequest { /** - *The ARN of the trial component.
+ *The domain ID.
*/ - TrialComponentArn?: string; + DomainId: string | undefined; /** - *The Amazon Resource Name (ARN) of the trial.
+ *The user profile name. This value is not case sensitive.
*/ - TrialArn?: string; + UserProfileName: string | undefined; } -export namespace DisassociateTrialComponentResponse { +export namespace DescribeUserProfileRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DisassociateTrialComponentResponse): any => ({ + export const filterSensitiveLog = (obj: DescribeUserProfileRequest): any => ({ ...obj, }); } -/** - *The domain's details.
- */ -export interface DomainDetails { +export enum UserProfileStatus { + Delete_Failed = "Delete_Failed", + Deleting = "Deleting", + Failed = "Failed", + InService = "InService", + Pending = "Pending", + Update_Failed = "Update_Failed", + Updating = "Updating", +} + +export interface DescribeUserProfileResponse { /** - *The domain's Amazon Resource Name (ARN).
+ *The ID of the domain that contains the profile.
*/ - DomainArn?: string; + DomainId?: string; /** - *The domain ID.
+ *The user profile Amazon Resource Name (ARN).
*/ - DomainId?: string; + UserProfileArn?: string; /** - *The domain name.
+ *The user profile name.
*/ - DomainName?: string; + UserProfileName?: string; /** - *The status.
+ *The ID of the user's profile in the Amazon Elastic File System (EFS) volume.
*/ - Status?: DomainStatus | string; + HomeEfsFileSystemUid?: string; /** - *The creation time.
+ *The status.
*/ - CreationTime?: Date; + Status?: UserProfileStatus | string; /** *The last modified time.
@@ -4338,5153 +4272,5221 @@ export interface DomainDetails { LastModifiedTime?: Date; /** - *The domain's URL.
+ *The creation time.
*/ - Url?: string; -} + CreationTime?: Date; -export namespace DomainDetails { /** - * @internal + *The failure reason.
*/ - export const filterSensitiveLog = (obj: DomainDetails): any => ({ - ...obj, - }); -} + FailureReason?: string; + + /** + *The SSO user identifier.
+ */ + SingleSignOnUserIdentifier?: string; -/** - *A collection of settings that update the current configuration for the RStudioServerPro
Domain-level app.
The execution role for the RStudioServerPro
Domain-level app.
The SSO user value.
*/ - DomainExecutionRoleArn: string | undefined; + SingleSignOnUserValue?: string; /** - *Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that - * the version runs on.
+ *A collection of settings.
*/ - DefaultResourceSpec?: ResourceSpec; + UserSettings?: UserSettings; } -export namespace RStudioServerProDomainSettingsForUpdate { +export namespace DescribeUserProfileResponse { /** * @internal */ - export const filterSensitiveLog = (obj: RStudioServerProDomainSettingsForUpdate): any => ({ + export const filterSensitiveLog = (obj: DescribeUserProfileResponse): any => ({ ...obj, }); } -/** - *A collection of Domain
configuration settings to update.
A collection of RStudioServerPro
Domain-level app settings to update.
The name of the private workforce whose access you want to restrict.
+ * WorkforceName
is automatically set to default
when a
+ * workforce is created and cannot be modified.
A directed edge connecting two lineage entities.
+ *Your OIDC IdP workforce configuration.
*/ -export interface Edge { +export interface OidcConfigForResponse { /** - *The Amazon Resource Name (ARN) of the source lineage entity of the directed edge.
+ *The OIDC IdP client ID used to configure your private workforce.
*/ - SourceArn?: string; + ClientId?: string; /** - *The Amazon Resource Name (ARN) of the destination lineage entity of the directed edge.
+ *The OIDC IdP issuer used to configure your private workforce.
*/ - DestinationArn?: string; + Issuer?: string; /** - *The type of the Association(Edge) between the source and destination. For example ContributedTo
,
- * Produced
, or DerivedFrom
.
The OIDC IdP authorization endpoint used to configure your private workforce.
*/ - AssociationType?: AssociationEdgeType | string; + AuthorizationEndpoint?: string; + + /** + *The OIDC IdP token endpoint used to configure your private workforce.
+ */ + TokenEndpoint?: string; + + /** + *The OIDC IdP user information endpoint used to configure your private workforce.
+ */ + UserInfoEndpoint?: string; + + /** + *The OIDC IdP logout endpoint used to configure your private workforce.
+ */ + LogoutEndpoint?: string; + + /** + *The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.
+ */ + JwksUri?: string; } -export namespace Edge { +export namespace OidcConfigForResponse { /** * @internal */ - export const filterSensitiveLog = (obj: Edge): any => ({ + export const filterSensitiveLog = (obj: OidcConfigForResponse): any => ({ ...obj, }); } +export enum WorkforceStatus { + ACTIVE = "Active", + DELETING = "Deleting", + FAILED = "Failed", + INITIALIZING = "Initializing", + UPDATING = "Updating", +} + /** - *Status of edge devices with this model.
+ *A VpcConfig object that specifies the VPC that you want your workforce to connect to.
*/ -export interface EdgeModelStat { - /** - *The name of the model.
- */ - ModelName: string | undefined; - - /** - *The model version.
- */ - ModelVersion: string | undefined; - +export interface WorkforceVpcConfigResponse { /** - *The number of devices that have this model version and do not have a heart beat.
+ *The ID of the VPC that the workforce uses for communication.
*/ - OfflineDeviceCount: number | undefined; + VpcId: string | undefined; /** - *The number of devices that have this model version and have a heart beat.
+ *The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.
*/ - ConnectedDeviceCount: number | undefined; + SecurityGroupIds: string[] | undefined; /** - *The number of devices that have this model version, a heart beat, and are currently running.
+ *The ID of the subnets in the VPC that you want to connect.
*/ - ActiveDeviceCount: number | undefined; + Subnets: string[] | undefined; /** - *The number of devices with this model version and are producing sample data.
+ *The IDs for the VPC service endpoints of your VPC workforce when it is created and updated.
*/ - SamplingDeviceCount: number | undefined; + VpcEndpointId?: string; } -export namespace EdgeModelStat { +export namespace WorkforceVpcConfigResponse { /** * @internal */ - export const filterSensitiveLog = (obj: EdgeModelStat): any => ({ + export const filterSensitiveLog = (obj: WorkforceVpcConfigResponse): any => ({ ...obj, }); } /** - *Summary of edge packaging job.
+ *A single private workforce, which is automatically created when you create your first + * private work team. You can create one private work force in each Amazon Web Services Region. By default, + * any workforce-related API operation used in a specific region will apply to the + * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.
*/ -export interface EdgePackagingJobSummary { +export interface Workforce { /** - *The Amazon Resource Name (ARN) of the edge packaging job.
+ *The name of the private workforce.
*/ - EdgePackagingJobArn: string | undefined; + WorkforceName: string | undefined; /** - *The name of the edge packaging job.
+ *The Amazon Resource Name (ARN) of the private workforce.
*/ - EdgePackagingJobName: string | undefined; + WorkforceArn: string | undefined; /** - *The status of the edge packaging job.
+ *The most recent date that was used to + * successfully add one or more IP address ranges (CIDRs) to a private workforce's + * allow list.
*/ - EdgePackagingJobStatus: EdgePackagingJobStatus | string | undefined; + LastUpdatedDate?: Date; /** - *The name of the SageMaker Neo compilation job.
+ *A list of one to ten IP address ranges (CIDRs) to be added to the + * workforce allow list. By default, a workforce isn't restricted to specific IP addresses.
*/ - CompilationJobName?: string; + SourceIpConfig?: SourceIpConfig; /** - *The name of the model.
+ *The subdomain for your OIDC Identity Provider.
*/ - ModelName?: string; + SubDomain?: string; /** - *The version of the model.
+ *The configuration of an Amazon Cognito workforce. + * A single Cognito workforce is created using and corresponds to a single + * + * Amazon Cognito user pool.
*/ - ModelVersion?: string; + CognitoConfig?: CognitoConfig; /** - *The timestamp of when the job was created.
+ *The configuration of an OIDC Identity Provider (IdP) private workforce.
*/ - CreationTime?: Date; + OidcConfig?: OidcConfigForResponse; /** - *The timestamp of when the edge packaging job was last updated.
+ *The date that the workforce is created.
*/ - LastModifiedTime?: Date; -} + CreateDate?: Date; -export namespace EdgePackagingJobSummary { /** - * @internal + *The configuration of a VPC workforce.
*/ - export const filterSensitiveLog = (obj: EdgePackagingJobSummary): any => ({ - ...obj, - }); -} + WorkforceVpcConfig?: WorkforceVpcConfigResponse; -/** - *The configurations and outcomes of an Amazon EMR step execution.
- */ -export interface EMRStepMetadata { /** - *The identifier of the EMR cluster.
+ *The status of your workforce.
*/ - ClusterId?: string; + Status?: WorkforceStatus | string; /** - *The identifier of the EMR cluster step.
+ *The reason your workforce failed.
*/ - StepId?: string; + FailureReason?: string; +} +export namespace Workforce { /** - *The name of the EMR cluster step.
+ * @internal */ - StepName?: string; + export const filterSensitiveLog = (obj: Workforce): any => ({ + ...obj, + }); +} +export interface DescribeWorkforceResponse { /** - *The path to the log file where the cluster step's failure root cause - * is recorded.
+ *A single private workforce, which is automatically created when you create your first + * private work team. You can create one private work force in each Amazon Web Services Region. By default, + * any workforce-related API operation used in a specific region will apply to the + * workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.
*/ - LogFilePath?: string; + Workforce: Workforce | undefined; } -export namespace EMRStepMetadata { +export namespace DescribeWorkforceResponse { /** * @internal */ - export const filterSensitiveLog = (obj: EMRStepMetadata): any => ({ + export const filterSensitiveLog = (obj: DescribeWorkforceResponse): any => ({ ...obj, }); } -export interface EnableSagemakerServicecatalogPortfolioInput {} - -export namespace EnableSagemakerServicecatalogPortfolioInput { +export interface DescribeWorkteamRequest { /** - * @internal + *The name of the work team to return a description of.
*/ - export const filterSensitiveLog = (obj: EnableSagemakerServicecatalogPortfolioInput): any => ({ - ...obj, - }); + WorkteamName: string | undefined; } -export interface EnableSagemakerServicecatalogPortfolioOutput {} - -export namespace EnableSagemakerServicecatalogPortfolioOutput { +export namespace DescribeWorkteamRequest { /** * @internal */ - export const filterSensitiveLog = (obj: EnableSagemakerServicecatalogPortfolioOutput): any => ({ + export const filterSensitiveLog = (obj: DescribeWorkteamRequest): any => ({ ...obj, }); } /** - *A schedule for a model monitoring job. For information about model monitor, see - * Amazon SageMaker Model - * Monitor.
+ *Provides details about a labeling work team.
*/ -export interface MonitoringSchedule { - /** - *The Amazon Resource Name (ARN) of the monitoring schedule.
- */ - MonitoringScheduleArn?: string; - +export interface Workteam { /** - *The name of the monitoring schedule.
+ *The name of the work team.
*/ - MonitoringScheduleName?: string; + WorkteamName: string | undefined; /** - *The status of the monitoring schedule. This can be one of the following values.
- *
- * PENDING
- The schedule is pending being created.
- * FAILED
- The schedule failed.
- * SCHEDULED
- The schedule was successfully created.
- * STOPPED
- The schedule was stopped.
A list of MemberDefinition
objects that contains objects that identify
+ * the workers that make up the work team.
Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP).
+ * For private workforces created using Amazon Cognito use
+ * CognitoMemberDefinition
. For workforces created using your own OIDC identity
+ * provider (IdP) use OidcMemberDefinition
.
The type of the monitoring job definition to schedule.
+ *The Amazon Resource Name (ARN) that identifies the work team.
*/ - MonitoringType?: MonitoringType | string; + WorkteamArn: string | undefined; /** - *If the monitoring schedule failed, the reason it failed.
+ *The Amazon Resource Name (ARN) of the workforce.
*/ - FailureReason?: string; + WorkforceArn?: string; /** - *The time that the monitoring schedule was created.
+ *The Amazon Marketplace identifier for a vendor's work team.
*/ - CreationTime?: Date; + ProductListingIds?: string[]; /** - *The last time the monitoring schedule was changed.
+ *A description of the work team.
*/ - LastModifiedTime?: Date; + Description: string | undefined; /** - *Configures the monitoring schedule and defines the monitoring job.
+ *The URI of the labeling job's user interface. Workers open this URI to start labeling + * your data objects.
*/ - MonitoringScheduleConfig?: MonitoringScheduleConfig; + SubDomain?: string; /** - *The endpoint that hosts the model being monitored.
+ *The date and time that the work team was created (timestamp).
*/ - EndpointName?: string; + CreateDate?: Date; /** - *Summary of information about the last monitoring job to run.
+ *The date and time that the work team was last updated (timestamp).
*/ - LastMonitoringExecutionSummary?: MonitoringExecutionSummary; + LastUpdatedDate?: Date; /** - *A list of the tags associated with the monitoring schedlue. For more information, see Tagging Amazon Web Services - * resources in the Amazon Web Services General Reference Guide.
+ *Configures SNS notifications of available or expiring work items for work + * teams.
*/ - Tags?: Tag[]; + NotificationConfiguration?: NotificationConfiguration; } -export namespace MonitoringSchedule { +export namespace Workteam { /** * @internal */ - export const filterSensitiveLog = (obj: MonitoringSchedule): any => ({ + export const filterSensitiveLog = (obj: Workteam): any => ({ ...obj, }); } -/** - *A hosted endpoint for real-time inference.
- */ -export interface Endpoint { - /** - *The name of the endpoint.
- */ - EndpointName: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the endpoint.
- */ - EndpointArn: string | undefined; - +export interface DescribeWorkteamResponse { /** - *The endpoint configuration associated with the endpoint.
+ *A Workteam
instance that contains information about the work team.
+ *
A list of the production variants hosted on the endpoint. Each production variant is a - * model.
+ * @internal */ - ProductionVariants?: ProductionVariantSummary[]; + export const filterSensitiveLog = (obj: DescribeWorkteamResponse): any => ({ + ...obj, + }); +} +/** + *Specifies weight and capacity values for a production variant.
+ */ +export interface DesiredWeightAndCapacity { /** - *The currently active data capture configuration used by your Endpoint.
+ *The name of the variant to update.
*/ - DataCaptureConfig?: DataCaptureConfigSummary; + VariantName: string | undefined; /** - *The status of the endpoint.
+ *The variant's weight.
*/ - EndpointStatus: EndpointStatus | string | undefined; + DesiredWeight?: number; /** - *If the endpoint failed, the reason it failed.
+ *The variant's capacity.
*/ - FailureReason?: string; + DesiredInstanceCount?: number; +} +export namespace DesiredWeightAndCapacity { /** - *The time that the endpoint was created.
+ * @internal */ - CreationTime: Date | undefined; + export const filterSensitiveLog = (obj: DesiredWeightAndCapacity): any => ({ + ...obj, + }); +} +/** + *Information of a particular device.
+ */ +export interface Device { /** - *The last time the endpoint was modified.
+ *The name of the device.
*/ - LastModifiedTime: Date | undefined; + DeviceName: string | undefined; /** - *A list of monitoring schedules for the endpoint. For information about model - * monitoring, see Amazon SageMaker Model Monitor.
+ *Description of the device.
*/ - MonitoringSchedules?: MonitoringSchedule[]; + Description?: string; /** - *A list of the tags associated with the endpoint. For more information, see Tagging Amazon Web Services - * resources in the Amazon Web Services General Reference Guide.
+ *Amazon Web Services Internet of Things (IoT) object name.
*/ - Tags?: Tag[]; + IotThingName?: string; } -export namespace Endpoint { +export namespace Device { /** * @internal */ - export const filterSensitiveLog = (obj: Endpoint): any => ({ + export const filterSensitiveLog = (obj: Device): any => ({ ...obj, }); } -export enum EndpointConfigSortKey { - CreationTime = "CreationTime", - Name = "Name", +export enum DeviceDeploymentStatus { + Deployed = "DEPLOYED", + Failed = "FAILED", + InProgress = "INPROGRESS", + ReadyToDeploy = "READYTODEPLOY", + Stopped = "STOPPED", + Stopping = "STOPPING", } /** - *Provides summary information for an endpoint configuration.
+ *Contains information summarizing device details and deployment status.
*/ -export interface EndpointConfigSummary { +export interface DeviceDeploymentSummary { /** - *The name of the endpoint configuration.
+ *The ARN of the edge deployment plan.
*/ - EndpointConfigName: string | undefined; + EdgeDeploymentPlanArn: string | undefined; /** - *The Amazon Resource Name (ARN) of the endpoint configuration.
+ *The name of the edge deployment plan.
*/ - EndpointConfigArn: string | undefined; + EdgeDeploymentPlanName: string | undefined; /** - *A timestamp that shows when the endpoint configuration was created.
+ *The name of the stage in the edge deployment plan.
*/ - CreationTime: Date | undefined; -} + StageName: string | undefined; -export namespace EndpointConfigSummary { /** - * @internal + *The name of the deployed stage.
*/ - export const filterSensitiveLog = (obj: EndpointConfigSummary): any => ({ - ...obj, - }); -} + DeployedStageName?: string; -export enum EndpointSortKey { - CreationTime = "CreationTime", - Name = "Name", - Status = "Status", -} + /** + *The name of the fleet to which the device belongs to.
+ */ + DeviceFleetName?: string; -/** - *Provides summary information for an endpoint.
- */ -export interface EndpointSummary { /** - *The name of the endpoint.
+ *The name of the device.
*/ - EndpointName: string | undefined; + DeviceName: string | undefined; /** - *The Amazon Resource Name (ARN) of the endpoint.
+ *The ARN of the device.
*/ - EndpointArn: string | undefined; + DeviceArn: string | undefined; /** - *A timestamp that shows when the endpoint was created.
+ *The deployment status of the device.
*/ - CreationTime: Date | undefined; + DeviceDeploymentStatus?: DeviceDeploymentStatus | string; /** - *A timestamp that shows when the endpoint was last modified.
+ *The detailed error message for the deployoment status result.
*/ - LastModifiedTime: Date | undefined; + DeviceDeploymentStatusMessage?: string; /** - *The status of the endpoint.
- *
- * OutOfService
: Endpoint is not available to take incoming
- * requests.
- * Creating
: CreateEndpoint is executing.
- * Updating
: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.
- * SystemUpdating
: Endpoint is undergoing maintenance and cannot be
- * updated or deleted or re-scaled until it has completed. This maintenance
- * operation does not change any customer-specified values such as VPC config, KMS
- * encryption, model, instance type, or instance count.
- * RollingBack
: Endpoint fails to scale up or down or change its
- * variant weight and is in the process of rolling back to its previous
- * configuration. Once the rollback completes, endpoint returns to an
- * InService
status. This transitional status only applies to an
- * endpoint that has autoscaling enabled and is undergoing variant weight or
- * capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called
- * explicitly.
- * InService
: Endpoint is available to process incoming
- * requests.
- * Deleting
: DeleteEndpoint is executing.
- * Failed
: Endpoint could not be created, updated, or re-scaled. Use
- * DescribeEndpointOutput$FailureReason for information about
- * the failure. DeleteEndpoint is the only operation that can be
- * performed on a failed endpoint.
To get a list of endpoints with a specified status, use the ListEndpointsInput$StatusEquals filter.
+ *The description of the device.
*/ - EndpointStatus: EndpointStatus | string | undefined; + Description?: string; + + /** + *The time when the deployment on the device started.
+ */ + DeploymentStartTime?: Date; } -export namespace EndpointSummary { +export namespace DeviceDeploymentSummary { /** * @internal */ - export const filterSensitiveLog = (obj: EndpointSummary): any => ({ + export const filterSensitiveLog = (obj: DeviceDeploymentSummary): any => ({ ...obj, }); } /** - *The properties of an experiment as returned by the Search API.
+ *Summary of the device fleet.
*/ -export interface Experiment { +export interface DeviceFleetSummary { /** - *The name of the experiment.
+ *Amazon Resource Name (ARN) of the device fleet.
*/ - ExperimentName?: string; + DeviceFleetArn: string | undefined; /** - *The Amazon Resource Name (ARN) of the experiment.
+ *Name of the device fleet.
*/ - ExperimentArn?: string; + DeviceFleetName: string | undefined; /** - *The name of the experiment as displayed. If DisplayName
isn't specified,
- * ExperimentName
is displayed.
Timestamp of when the device fleet was created.
*/ - DisplayName?: string; + CreationTime?: Date; /** - *The source of the experiment.
+ *Timestamp of when the device fleet was last updated.
*/ - Source?: ExperimentSource; + LastModifiedTime?: Date; +} +export namespace DeviceFleetSummary { /** - *The description of the experiment.
+ * @internal */ - Description?: string; + export const filterSensitiveLog = (obj: DeviceFleetSummary): any => ({ + ...obj, + }); +} +/** + *Status of devices.
+ */ +export interface DeviceStats { /** - *When the experiment was created.
+ *The number of devices connected with a heartbeat.
*/ - CreationTime?: Date; + ConnectedDeviceCount: number | undefined; /** - *Who created the experiment.
+ *The number of registered devices.
*/ - CreatedBy?: UserContext; + RegisteredDeviceCount: number | undefined; +} +export namespace DeviceStats { /** - *When the experiment was last modified.
+ * @internal */ - LastModifiedTime?: Date; + export const filterSensitiveLog = (obj: DeviceStats): any => ({ + ...obj, + }); +} +/** + *Summary of model on edge device.
+ */ +export interface EdgeModelSummary { /** - *Information about the user who created or modified an experiment, trial, trial - * component, lineage group, or project.
+ *The name of the model.
*/ - LastModifiedBy?: UserContext; + ModelName: string | undefined; /** - *The list of tags that are associated with the experiment. You can use Search API to search on the tags.
+ *The version model.
*/ - Tags?: Tag[]; + ModelVersion: string | undefined; } -export namespace Experiment { +export namespace EdgeModelSummary { /** * @internal */ - export const filterSensitiveLog = (obj: Experiment): any => ({ + export const filterSensitiveLog = (obj: EdgeModelSummary): any => ({ ...obj, }); } /** - *A summary of the properties of an experiment. To get the complete set of properties, call
- * the DescribeExperiment API and provide the
- * ExperimentName
.
Summary of the device.
*/ -export interface ExperimentSummary { +export interface DeviceSummary { /** - *The Amazon Resource Name (ARN) of the experiment.
+ *The unique identifier of the device.
*/ - ExperimentArn?: string; + DeviceName: string | undefined; /** - *The name of the experiment.
+ *Amazon Resource Name (ARN) of the device.
*/ - ExperimentName?: string; + DeviceArn: string | undefined; /** - *The name of the experiment as displayed. If DisplayName
isn't specified,
- * ExperimentName
is displayed.
A description of the device.
*/ - DisplayName?: string; + Description?: string; /** - *The source of the experiment.
+ *The name of the fleet the device belongs to.
*/ - ExperimentSource?: ExperimentSource; + DeviceFleetName?: string; /** - *When the experiment was created.
+ *The Amazon Web Services Internet of Things (IoT) object thing name associated with the device..
*/ - CreationTime?: Date; + IotThingName?: string; /** - *When the experiment was last modified.
+ *The timestamp of the last registration or de-reregistration.
*/ - LastModifiedTime?: Date; + RegistrationTime?: Date; + + /** + *The last heartbeat received from the device.
+ */ + LatestHeartbeat?: Date; + + /** + *Models on the device.
+ */ + Models?: EdgeModelSummary[]; + + /** + *Edge Manager agent version.
+ */ + AgentVersion?: string; } -export namespace ExperimentSummary { +export namespace DeviceSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ExperimentSummary): any => ({ + export const filterSensitiveLog = (obj: DeviceSummary): any => ({ ...obj, }); } -/** - *The container for the metadata for Fail step.
- */ -export interface FailStepMetadata { +export enum Direction { + ASCENDANTS = "Ascendants", + BOTH = "Both", + DESCENDANTS = "Descendants", +} + +export interface DisableSagemakerServicecatalogPortfolioInput {} + +export namespace DisableSagemakerServicecatalogPortfolioInput { /** - *A message that you define and then is processed and rendered by - * the Fail step when the error occurs.
+ * @internal */ - ErrorMessage?: string; + export const filterSensitiveLog = (obj: DisableSagemakerServicecatalogPortfolioInput): any => ({ + ...obj, + }); } -export namespace FailStepMetadata { +export interface DisableSagemakerServicecatalogPortfolioOutput {} + +export namespace DisableSagemakerServicecatalogPortfolioOutput { /** * @internal */ - export const filterSensitiveLog = (obj: FailStepMetadata): any => ({ + export const filterSensitiveLog = (obj: DisableSagemakerServicecatalogPortfolioOutput): any => ({ ...obj, }); } -/** - *Amazon SageMaker Feature Store stores features in a collection called Feature Group. - * A Feature Group can be visualized as a table which has rows, - * with a unique identifier for each row where each column in the table is a feature. - * In principle, a Feature Group is composed of features and values per features.
- */ -export interface FeatureGroup { +export interface DisassociateTrialComponentRequest { /** - *The Amazon Resource Name (ARN) of a FeatureGroup
.
The name of the component to disassociate from the trial.
*/ - FeatureGroupArn?: string; + TrialComponentName: string | undefined; /** - *The name of the FeatureGroup
.
The name of the trial to disassociate from.
*/ - FeatureGroupName?: string; + TrialName: string | undefined; +} +export namespace DisassociateTrialComponentRequest { /** - *The name of the Feature
whose value uniquely identifies a
- * Record
defined in the FeatureGroup
- * FeatureDefinitions
.
The name of the feature that stores the EventTime
of a Record in a
- * FeatureGroup
.
A EventTime
is point in time when a new event
- * occurs that corresponds to the creation or update of a Record
in
- * FeatureGroup
. All Records
in the FeatureGroup
- * must have a corresponding EventTime
.
The ARN of the trial component.
*/ - EventTimeFeatureName?: string; + TrialComponentArn?: string; /** - *A list of Feature
s. Each Feature
must include a
- * FeatureName
and a FeatureType
.
Valid FeatureType
s are Integral
, Fractional
and
- * String
.
- * FeatureName
s cannot be any of the following: is_deleted
,
- * write_time
, api_invocation_time
.
You can create up to 2,500 FeatureDefinition
s per
- * FeatureGroup
.
The Amazon Resource Name (ARN) of the trial.
*/ - FeatureDefinitions?: FeatureDefinition[]; + TrialArn?: string; +} +export namespace DisassociateTrialComponentResponse { /** - *The time a FeatureGroup
was created.
The domain's details.
+ */ +export interface DomainDetails { /** - *A timestamp indicating the last time you updated the feature group.
+ *The domain's Amazon Resource Name (ARN).
*/ - LastModifiedTime?: Date; + DomainArn?: string; /** - *Use this to specify the Amazon Web Services Key Management Service (KMS) Key ID, or
- * KMSKeyId
, for at rest data encryption. You can turn
- * OnlineStore
on or off by specifying the EnableOnlineStore
flag
- * at General Assembly; the default value is False
.
The domain ID.
*/ - OnlineStoreConfig?: OnlineStoreConfig; + DomainId?: string; /** - *The configuration of an OfflineStore
.
Provide an OfflineStoreConfig
in a request to
- * CreateFeatureGroup
to create an OfflineStore
.
To encrypt an OfflineStore
using at rest data encryption, specify Amazon Web Services Key
- * Management Service (KMS) key ID, or KMSKeyId
, in
- * S3StorageConfig
.
The domain name.
*/ - OfflineStoreConfig?: OfflineStoreConfig; + DomainName?: string; /** - *The Amazon Resource Name (ARN) of the IAM execution role used to create the feature - * group.
+ *The status.
*/ - RoleArn?: string; + Status?: DomainStatus | string; /** - *A FeatureGroup
status.
The creation time.
*/ - FeatureGroupStatus?: FeatureGroupStatus | string; + CreationTime?: Date; /** - *The status of OfflineStore
.
The last modified time.
*/ - OfflineStoreStatus?: OfflineStoreStatus; + LastModifiedTime?: Date; /** - *A value that indicates whether the feature group was updated successfully.
+ *The domain's URL.
*/ - LastUpdateStatus?: LastUpdateStatus; + Url?: string; +} +export namespace DomainDetails { /** - *The reason that the FeatureGroup
failed to
- * be replicated in the OfflineStore
. This is
- * failure may be due to a failure to create a FeatureGroup
in
- * or delete a FeatureGroup
from the OfflineStore
.
A collection of settings that update the current configuration for the RStudioServerPro
Domain-level app.
A free form description of a FeatureGroup
.
The execution role for the RStudioServerPro
Domain-level app.
Tags used to define a FeatureGroup
.
Specifies the ARN's of a SageMaker image and SageMaker image version, and the instance type that + * the version runs on.
*/ - Tags?: Tag[]; + DefaultResourceSpec?: ResourceSpec; } -export namespace FeatureGroup { +export namespace RStudioServerProDomainSettingsForUpdate { /** * @internal */ - export const filterSensitiveLog = (obj: FeatureGroup): any => ({ + export const filterSensitiveLog = (obj: RStudioServerProDomainSettingsForUpdate): any => ({ ...obj, }); } -export enum FeatureGroupSortBy { - CREATION_TIME = "CreationTime", - FEATURE_GROUP_STATUS = "FeatureGroupStatus", - NAME = "Name", - OFFLINE_STORE_STATUS = "OfflineStoreStatus", -} - -export enum FeatureGroupSortOrder { - ASCENDING = "Ascending", - DESCENDING = "Descending", -} - /** - *The name, Arn, CreationTime
, FeatureGroup
values,
- * LastUpdatedTime
and EnableOnlineStorage
status of a
- * FeatureGroup
.
A collection of Domain
configuration settings to update.
The name of FeatureGroup
.
A collection of RStudioServerPro
Domain-level app settings to update.
Unique identifier for the FeatureGroup
.
A directed edge connecting two lineage entities.
+ */ +export interface Edge { /** - *A timestamp indicating the time of creation time of the FeatureGroup
.
The Amazon Resource Name (ARN) of the source lineage entity of the directed edge.
*/ - CreationTime: Date | undefined; + SourceArn?: string; /** - *The status of a FeatureGroup. The status can be any of the following:
- * Creating
, Created
, CreateFail
,
- * Deleting
or DetailFail
.
The Amazon Resource Name (ARN) of the destination lineage entity of the directed edge.
*/ - FeatureGroupStatus?: FeatureGroupStatus | string; + DestinationArn?: string; /** - *Notifies you if replicating data into the OfflineStore
has failed. Returns
- * either: Active
or Blocked
.
The type of the Association(Edge) between the source and destination. For example ContributedTo
,
+ * Produced
, or DerivedFrom
.
The metadata for a feature. It can either be metadata that you specify, or metadata that is updated automatically.
+ *Contains information summarizing an edge deployment plan.
*/ -export interface FeatureMetadata { +export interface EdgeDeploymentPlanSummary { /** - *The Amazon Resource Number (ARN) of the feature group.
+ *The ARN of the edge deployment plan.
*/ - FeatureGroupArn?: string; + EdgeDeploymentPlanArn: string | undefined; /** - *The name of the feature group containing the feature.
+ *The name of the edge deployment plan.
*/ - FeatureGroupName?: string; + EdgeDeploymentPlanName: string | undefined; /** - *The name of feature.
+ *The name of the device fleet used for the deployment.
*/ - FeatureName?: string; + DeviceFleetName: string | undefined; /** - *The data type of the feature.
+ *The number of edge devices with the successful deployment.
*/ - FeatureType?: FeatureType | string; + EdgeDeploymentSuccess: number | undefined; /** - *A timestamp indicating when the feature was created.
+ *The number of edge devices yet to pick up the deployment, or in progress.
*/ - CreationTime?: Date; + EdgeDeploymentPending: number | undefined; /** - *A timestamp indicating when the feature was last modified.
+ *The number of edge devices that failed the deployment.
*/ - LastModifiedTime?: Date; + EdgeDeploymentFailed: number | undefined; /** - *An optional description that you specify to better describe the feature.
+ *The time when the edge deployment plan was created.
*/ - Description?: string; + CreationTime?: Date; /** - *Optional key-value pairs that you specify to better describe the feature.
+ *The time when the edge deployment plan was last updated.
*/ - Parameters?: FeatureParameter[]; + LastModifiedTime?: Date; } -export namespace FeatureMetadata { +export namespace EdgeDeploymentPlanSummary { /** * @internal */ - export const filterSensitiveLog = (obj: FeatureMetadata): any => ({ + export const filterSensitiveLog = (obj: EdgeDeploymentPlanSummary): any => ({ ...obj, }); } -export enum Operator { - CONTAINS = "Contains", - EQUALS = "Equals", - EXISTS = "Exists", - GREATER_THAN = "GreaterThan", - GREATER_THAN_OR_EQUAL_TO = "GreaterThanOrEqualTo", - IN = "In", - LESS_THAN = "LessThan", - LESS_THAN_OR_EQUAL_TO = "LessThanOrEqualTo", - NOT_EQUALS = "NotEquals", - NOT_EXISTS = "NotExists", -} - -/** - *A conditional statement for a search expression that includes a resource property, a - * Boolean operator, and a value. Resources that match the statement are returned in the - * results from the Search API.
- * - *If you specify a Value
, but not an Operator
, Amazon SageMaker uses the
- * equals operator.
In search, there are several property types:
- *To define a metric filter, enter a value using the form
- * "Metrics.
, where
is
- * a metric name. For example, the following filter searches for training jobs
- * with an "accuracy"
metric greater than
- * "0.9"
:
- * {
- *
- * "Name": "Metrics.accuracy",
- *
- * "Operator": "GreaterThan",
- *
- * "Value": "0.9"
- *
- * }
- *
To define a hyperparameter filter, enter a value with the form
- * "HyperParameters.
. Decimal hyperparameter
- * values are treated as a decimal in a comparison if the specified
- * Value
is also a decimal value. If the specified
- * Value
is an integer, the decimal hyperparameter values are
- * treated as integers. For example, the following filter is satisfied by
- * training jobs with a "learning_rate"
hyperparameter that is
- * less than "0.5"
:
- * {
- *
- * "Name": "HyperParameters.learning_rate",
- *
- * "Operator": "LessThan",
- *
- * "Value": "0.5"
- *
- * }
- *
To define a tag filter, enter a value with the form
- * Tags.
.
A resource property name. For example, TrainingJobName
. For
- * valid property names, see SearchRecord.
- * You must specify a valid property for the resource.
A Boolean binary operator that is used to evaluate the filter. The operator field - * contains one of the following values:
- *The value of Name
equals Value
.
The value of Name
doesn't equal Value
.
The Name
property exists.
The Name
property does not exist.
The value of Name
is greater than Value
.
- * Not supported for text properties.
The value of Name
is greater than or equal to Value
.
- * Not supported for text properties.
The value of Name
is less than Value
.
- * Not supported for text properties.
The value of Name
is less than or equal to Value
.
- * Not supported for text properties.
The value of Name
is one of the comma delimited strings in
- * Value
. Only supported for text properties.
The value of Name
contains the string Value
.
- * Only supported for text properties.
A SearchExpression
can include the Contains
operator
- * multiple times when the value of Name
is one of the following:
- * Experiment.DisplayName
- *
- * Experiment.ExperimentName
- *
- * Experiment.Tags
- *
- * Trial.DisplayName
- *
- * Trial.TrialName
- *
- * Trial.Tags
- *
- * TrialComponent.DisplayName
- *
- * TrialComponent.TrialComponentName
- *
- * TrialComponent.Tags
- *
- * TrialComponent.InputArtifacts
- *
- * TrialComponent.OutputArtifacts
- *
A SearchExpression
can include only one Contains
operator
- * for all other values of Name
. In these cases, if you include multiple
- * Contains
operators in the SearchExpression
, the result is
- * the following error message: "'CONTAINS' operator usage limit of 1
- * exceeded.
"
Status of edge devices with this model.
+ */ +export interface EdgeModelStat { + /** + *The name of the model.
*/ - Operator?: Operator | string; + ModelName: string | undefined; /** - *A value used with Name
and Operator
to determine which
- * resources satisfy the filter's condition. For numerical properties, Value
- * must be an integer or floating-point decimal. For timestamp properties,
- * Value
must be an ISO 8601 date-time string of the following format:
- * YYYY-mm-dd'T'HH:MM:SS
.
The model version.
*/ - Value?: string; + ModelVersion: string | undefined; + + /** + *The number of devices that have this model version and do not have a heart beat.
+ */ + OfflineDeviceCount: number | undefined; + + /** + *The number of devices that have this model version and have a heart beat.
+ */ + ConnectedDeviceCount: number | undefined; + + /** + *The number of devices that have this model version, a heart beat, and are currently running.
+ */ + ActiveDeviceCount: number | undefined; + + /** + *The number of devices with this model version and are producing sample data.
+ */ + SamplingDeviceCount: number | undefined; } -export namespace Filter { +export namespace EdgeModelStat { /** * @internal */ - export const filterSensitiveLog = (obj: Filter): any => ({ + export const filterSensitiveLog = (obj: EdgeModelStat): any => ({ ...obj, }); } /** - *Contains summary information about the flow definition.
+ *Summary of edge packaging job.
*/ -export interface FlowDefinitionSummary { +export interface EdgePackagingJobSummary { /** - *The name of the flow definition.
+ *The Amazon Resource Name (ARN) of the edge packaging job.
*/ - FlowDefinitionName: string | undefined; + EdgePackagingJobArn: string | undefined; /** - *The Amazon Resource Name (ARN) of the flow definition.
+ *The name of the edge packaging job.
*/ - FlowDefinitionArn: string | undefined; + EdgePackagingJobName: string | undefined; /** - *The status of the flow definition. Valid values:
+ *The status of the edge packaging job.
*/ - FlowDefinitionStatus: FlowDefinitionStatus | string | undefined; + EdgePackagingJobStatus: EdgePackagingJobStatus | string | undefined; /** - *The timestamp when SageMaker created the flow definition.
+ *The name of the SageMaker Neo compilation job.
*/ - CreationTime: Date | undefined; + CompilationJobName?: string; /** - *The reason why the flow definition creation failed. A failure reason is returned only when the flow definition status is Failed
.
The name of the model.
*/ - FailureReason?: string; + ModelName?: string; + + /** + *The version of the model.
+ */ + ModelVersion?: string; + + /** + *The timestamp of when the job was created.
+ */ + CreationTime?: Date; + + /** + *The timestamp of when the edge packaging job was last updated.
+ */ + LastModifiedTime?: Date; } -export namespace FlowDefinitionSummary { +export namespace EdgePackagingJobSummary { /** * @internal */ - export const filterSensitiveLog = (obj: FlowDefinitionSummary): any => ({ + export const filterSensitiveLog = (obj: EdgePackagingJobSummary): any => ({ ...obj, }); } -export interface GetDeviceFleetReportRequest { +/** + *The configurations and outcomes of an Amazon EMR step execution.
+ */ +export interface EMRStepMetadata { /** - *The name of the fleet.
+ *The identifier of the EMR cluster.
*/ - DeviceFleetName: string | undefined; + ClusterId?: string; + + /** + *The identifier of the EMR cluster step.
+ */ + StepId?: string; + + /** + *The name of the EMR cluster step.
+ */ + StepName?: string; + + /** + *The path to the log file where the cluster step's failure root cause + * is recorded.
+ */ + LogFilePath?: string; } -export namespace GetDeviceFleetReportRequest { +export namespace EMRStepMetadata { /** * @internal */ - export const filterSensitiveLog = (obj: GetDeviceFleetReportRequest): any => ({ + export const filterSensitiveLog = (obj: EMRStepMetadata): any => ({ ...obj, }); } -export interface GetDeviceFleetReportResponse { +export interface EnableSagemakerServicecatalogPortfolioInput {} + +export namespace EnableSagemakerServicecatalogPortfolioInput { /** - *The Amazon Resource Name (ARN) of the device.
+ * @internal */ - DeviceFleetArn: string | undefined; + export const filterSensitiveLog = (obj: EnableSagemakerServicecatalogPortfolioInput): any => ({ + ...obj, + }); +} + +export interface EnableSagemakerServicecatalogPortfolioOutput {} +export namespace EnableSagemakerServicecatalogPortfolioOutput { /** - *The name of the fleet.
+ * @internal */ - DeviceFleetName: string | undefined; + export const filterSensitiveLog = (obj: EnableSagemakerServicecatalogPortfolioOutput): any => ({ + ...obj, + }); +} +/** + *A schedule for a model monitoring job. For information about model monitor, see + * Amazon SageMaker Model + * Monitor.
+ */ +export interface MonitoringSchedule { /** - *The output configuration for storing sample data collected by the fleet.
+ *The Amazon Resource Name (ARN) of the monitoring schedule.
*/ - OutputConfig?: EdgeOutputConfig; + MonitoringScheduleArn?: string; /** - *Description of the fleet.
+ *The name of the monitoring schedule.
*/ - Description?: string; + MonitoringScheduleName?: string; /** - *Timestamp of when the report was generated.
+ *The status of the monitoring schedule. This can be one of the following values.
+ *
+ * PENDING
- The schedule is pending being created.
+ * FAILED
- The schedule failed.
+ * SCHEDULED
- The schedule was successfully created.
+ * STOPPED
- The schedule was stopped.
Status of devices.
+ *The type of the monitoring job definition to schedule.
*/ - DeviceStats?: DeviceStats; + MonitoringType?: MonitoringType | string; /** - *The versions of Edge Manager agent deployed on the fleet.
+ *If the monitoring schedule failed, the reason it failed.
*/ - AgentVersions?: AgentVersion[]; + FailureReason?: string; /** - *Status of model on device.
+ *The time that the monitoring schedule was created.
*/ - ModelStats?: EdgeModelStat[]; -} + CreationTime?: Date; -export namespace GetDeviceFleetReportResponse { /** - * @internal + *The last time the monitoring schedule was changed.
*/ - export const filterSensitiveLog = (obj: GetDeviceFleetReportResponse): any => ({ - ...obj, - }); -} + LastModifiedTime?: Date; -export interface GetLineageGroupPolicyRequest { /** - *The name or Amazon Resource Name (ARN) of the lineage group.
+ *Configures the monitoring schedule and defines the monitoring job.
*/ - LineageGroupName: string | undefined; + MonitoringScheduleConfig?: MonitoringScheduleConfig; + + /** + *The endpoint that hosts the model being monitored.
+ */ + EndpointName?: string; + + /** + *Summary of information about the last monitoring job to run.
+ */ + LastMonitoringExecutionSummary?: MonitoringExecutionSummary; + + /** + *A list of the tags associated with the monitoring schedlue. For more information, see Tagging Amazon Web Services + * resources in the Amazon Web Services General Reference Guide.
+ */ + Tags?: Tag[]; } -export namespace GetLineageGroupPolicyRequest { +export namespace MonitoringSchedule { /** * @internal */ - export const filterSensitiveLog = (obj: GetLineageGroupPolicyRequest): any => ({ + export const filterSensitiveLog = (obj: MonitoringSchedule): any => ({ ...obj, }); } -export interface GetLineageGroupPolicyResponse { +/** + *A hosted endpoint for real-time inference.
+ */ +export interface Endpoint { /** - *The Amazon Resource Name (ARN) of the lineage group.
+ *The name of the endpoint.
*/ - LineageGroupArn?: string; + EndpointName: string | undefined; /** - *The resource policy that gives access to the lineage group in another account.
+ *The Amazon Resource Name (ARN) of the endpoint.
*/ - ResourcePolicy?: string; -} + EndpointArn: string | undefined; + + /** + *The endpoint configuration associated with the endpoint.
+ */ + EndpointConfigName: string | undefined; + + /** + *A list of the production variants hosted on the endpoint. Each production variant is a + * model.
+ */ + ProductionVariants?: ProductionVariantSummary[]; + + /** + *The currently active data capture configuration used by your Endpoint.
+ */ + DataCaptureConfig?: DataCaptureConfigSummary; + + /** + *The status of the endpoint.
+ */ + EndpointStatus: EndpointStatus | string | undefined; + + /** + *If the endpoint failed, the reason it failed.
+ */ + FailureReason?: string; -export namespace GetLineageGroupPolicyResponse { /** - * @internal + *The time that the endpoint was created.
*/ - export const filterSensitiveLog = (obj: GetLineageGroupPolicyResponse): any => ({ - ...obj, - }); -} + CreationTime: Date | undefined; -export interface GetModelPackageGroupPolicyInput { /** - *The name of the model group for which to get the resource policy.
+ *The last time the endpoint was modified.
*/ - ModelPackageGroupName: string | undefined; -} + LastModifiedTime: Date | undefined; -export namespace GetModelPackageGroupPolicyInput { /** - * @internal + *A list of monitoring schedules for the endpoint. For information about model + * monitoring, see Amazon SageMaker Model Monitor.
*/ - export const filterSensitiveLog = (obj: GetModelPackageGroupPolicyInput): any => ({ - ...obj, - }); -} + MonitoringSchedules?: MonitoringSchedule[]; -export interface GetModelPackageGroupPolicyOutput { /** - *The resource policy for the model group.
+ *A list of the tags associated with the endpoint. For more information, see Tagging Amazon Web Services + * resources in the Amazon Web Services General Reference Guide.
*/ - ResourcePolicy: string | undefined; + Tags?: Tag[]; } -export namespace GetModelPackageGroupPolicyOutput { +export namespace Endpoint { /** * @internal */ - export const filterSensitiveLog = (obj: GetModelPackageGroupPolicyOutput): any => ({ + export const filterSensitiveLog = (obj: Endpoint): any => ({ ...obj, }); } -export interface GetSagemakerServicecatalogPortfolioStatusInput {} +export enum EndpointConfigSortKey { + CreationTime = "CreationTime", + Name = "Name", +} -export namespace GetSagemakerServicecatalogPortfolioStatusInput { +/** + *Provides summary information for an endpoint configuration.
+ */ +export interface EndpointConfigSummary { /** - * @internal + *The name of the endpoint configuration.
*/ - export const filterSensitiveLog = (obj: GetSagemakerServicecatalogPortfolioStatusInput): any => ({ - ...obj, - }); -} + EndpointConfigName: string | undefined; -export enum SagemakerServicecatalogStatus { - DISABLED = "Disabled", - ENABLED = "Enabled", -} + /** + *The Amazon Resource Name (ARN) of the endpoint configuration.
+ */ + EndpointConfigArn: string | undefined; -export interface GetSagemakerServicecatalogPortfolioStatusOutput { /** - *Whether Service Catalog is enabled or disabled in SageMaker.
+ *A timestamp that shows when the endpoint configuration was created.
*/ - Status?: SagemakerServicecatalogStatus | string; + CreationTime: Date | undefined; } -export namespace GetSagemakerServicecatalogPortfolioStatusOutput { +export namespace EndpointConfigSummary { /** * @internal */ - export const filterSensitiveLog = (obj: GetSagemakerServicecatalogPortfolioStatusOutput): any => ({ + export const filterSensitiveLog = (obj: EndpointConfigSummary): any => ({ ...obj, }); } -export enum ResourceType { - ENDPOINT = "Endpoint", - EXPERIMENT = "Experiment", - EXPERIMENT_TRIAL = "ExperimentTrial", - EXPERIMENT_TRIAL_COMPONENT = "ExperimentTrialComponent", - FEATURE_GROUP = "FeatureGroup", - FEATURE_METADATA = "FeatureMetadata", - MODEL_PACKAGE = "ModelPackage", - MODEL_PACKAGE_GROUP = "ModelPackageGroup", - PIPELINE = "Pipeline", - PIPELINE_EXECUTION = "PipelineExecution", - PROJECT = "Project", - TRAINING_JOB = "TrainingJob", +export enum EndpointSortKey { + CreationTime = "CreationTime", + Name = "Name", + Status = "Status", } /** - *Part of the SuggestionQuery
type. Specifies a hint for retrieving property
- * names that begin with the specified text.
Provides summary information for an endpoint.
*/ -export interface PropertyNameQuery { - /** - *Text that begins a property's name.
- */ - PropertyNameHint: string | undefined; -} - -export namespace PropertyNameQuery { +export interface EndpointSummary { /** - * @internal + *The name of the endpoint.
*/ - export const filterSensitiveLog = (obj: PropertyNameQuery): any => ({ - ...obj, - }); -} + EndpointName: string | undefined; -/** - *Specified in the GetSearchSuggestions request. - * Limits the property names that are included in the response.
- */ -export interface SuggestionQuery { /** - *Defines a property name hint. Only property - * names that begin with the specified hint are included in the response.
+ *The Amazon Resource Name (ARN) of the endpoint.
*/ - PropertyNameQuery?: PropertyNameQuery; -} + EndpointArn: string | undefined; -export namespace SuggestionQuery { /** - * @internal + *A timestamp that shows when the endpoint was created.
*/ - export const filterSensitiveLog = (obj: SuggestionQuery): any => ({ - ...obj, - }); -} + CreationTime: Date | undefined; -export interface GetSearchSuggestionsRequest { /** - *The name of the Amazon SageMaker resource to search for.
+ *A timestamp that shows when the endpoint was last modified.
*/ - Resource: ResourceType | string | undefined; + LastModifiedTime: Date | undefined; /** - *Limits the property names that are included in the response.
+ *The status of the endpoint.
+ *
+ * OutOfService
: Endpoint is not available to take incoming
+ * requests.
+ * Creating
: CreateEndpoint is executing.
+ * Updating
: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.
+ * SystemUpdating
: Endpoint is undergoing maintenance and cannot be
+ * updated or deleted or re-scaled until it has completed. This maintenance
+ * operation does not change any customer-specified values such as VPC config, KMS
+ * encryption, model, instance type, or instance count.
+ * RollingBack
: Endpoint fails to scale up or down or change its
+ * variant weight and is in the process of rolling back to its previous
+ * configuration. Once the rollback completes, endpoint returns to an
+ * InService
status. This transitional status only applies to an
+ * endpoint that has autoscaling enabled and is undergoing variant weight or
+ * capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called
+ * explicitly.
+ * InService
: Endpoint is available to process incoming
+ * requests.
+ * Deleting
: DeleteEndpoint is executing.
+ * Failed
: Endpoint could not be created, updated, or re-scaled. Use
+ * DescribeEndpointOutput$FailureReason for information about
+ * the failure. DeleteEndpoint is the only operation that can be
+ * performed on a failed endpoint.
To get a list of endpoints with a specified status, use the ListEndpointsInput$StatusEquals filter.
*/ - SuggestionQuery?: SuggestionQuery; + EndpointStatus: EndpointStatus | string | undefined; } -export namespace GetSearchSuggestionsRequest { +export namespace EndpointSummary { /** * @internal */ - export const filterSensitiveLog = (obj: GetSearchSuggestionsRequest): any => ({ + export const filterSensitiveLog = (obj: EndpointSummary): any => ({ ...obj, }); } /** - *A property name returned from a GetSearchSuggestions
call that specifies
- * a value in the PropertyNameQuery
field.
The properties of an experiment as returned by the Search API.
*/ -export interface PropertyNameSuggestion { +export interface Experiment { /** - *A suggested property name based on what you entered in the search textbox in the Amazon SageMaker - * console.
+ *The name of the experiment.
*/ - PropertyName?: string; -} + ExperimentName?: string; -export namespace PropertyNameSuggestion { /** - * @internal + *The Amazon Resource Name (ARN) of the experiment.
*/ - export const filterSensitiveLog = (obj: PropertyNameSuggestion): any => ({ - ...obj, - }); -} + ExperimentArn?: string; -export interface GetSearchSuggestionsResponse { /** - *A list of property names for a Resource
that match a
- * SuggestionQuery
.
The name of the experiment as displayed. If DisplayName
isn't specified,
+ * ExperimentName
is displayed.
The source of the experiment.
*/ - export const filterSensitiveLog = (obj: GetSearchSuggestionsResponse): any => ({ - ...obj, - }); -} + Source?: ExperimentSource; -/** - *Specifies configuration details for a Git repository when the repository is - * updated.
- */ -export interface GitConfigForUpdate { /** - *The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the
- * credentials used to access the git repository. The secret must have a staging label of
- * AWSCURRENT
and must be in the following format:
- * {"username": UserName, "password":
- * Password}
- *
The description of the experiment.
*/ - SecretArn?: string; -} + Description?: string; -export namespace GitConfigForUpdate { /** - * @internal + *When the experiment was created.
*/ - export const filterSensitiveLog = (obj: GitConfigForUpdate): any => ({ - ...obj, - }); -} + CreationTime?: Date; -/** - *Container for human task user interface information.
- */ -export interface HumanTaskUiSummary { /** - *The name of the human task user interface.
+ *Who created the experiment.
*/ - HumanTaskUiName: string | undefined; + CreatedBy?: UserContext; /** - *The Amazon Resource Name (ARN) of the human task user interface.
+ *When the experiment was last modified.
+ */ + LastModifiedTime?: Date; + + /** + *Information about the user who created or modified an experiment, trial, trial + * component, lineage group, or project.
*/ - HumanTaskUiArn: string | undefined; + LastModifiedBy?: UserContext; /** - *A timestamp when SageMaker created the human task user interface.
+ *The list of tags that are associated with the experiment. You can use Search API to search on the tags.
*/ - CreationTime: Date | undefined; + Tags?: Tag[]; } -export namespace HumanTaskUiSummary { +export namespace Experiment { /** * @internal */ - export const filterSensitiveLog = (obj: HumanTaskUiSummary): any => ({ + export const filterSensitiveLog = (obj: Experiment): any => ({ ...obj, }); } -export enum HyperParameterTuningJobSortByOptions { - CreationTime = "CreationTime", - Name = "Name", - Status = "Status", -} - /** - *Provides summary information about a hyperparameter tuning job.
+ *A summary of the properties of an experiment. To get the complete set of properties, call
+ * the DescribeExperiment API and provide the
+ * ExperimentName
.
The name of the tuning job.
- */ - HyperParameterTuningJobName: string | undefined; - +export interface ExperimentSummary { /** - *The - * Amazon - * Resource Name (ARN) of the tuning job.
+ *The Amazon Resource Name (ARN) of the experiment.
*/ - HyperParameterTuningJobArn: string | undefined; + ExperimentArn?: string; /** - *The status of the - * tuning - * job.
+ *The name of the experiment.
*/ - HyperParameterTuningJobStatus: HyperParameterTuningJobStatus | string | undefined; + ExperimentName?: string; /** - *Specifies the search strategy hyperparameter tuning uses to choose which - * hyperparameters to - * use - * for each iteration. Currently, the only valid value is - * Bayesian.
+ *The name of the experiment as displayed. If DisplayName
isn't specified,
+ * ExperimentName
is displayed.
The date and time that the tuning job was created.
+ *The source of the experiment.
*/ - CreationTime: Date | undefined; + ExperimentSource?: ExperimentSource; /** - *The date and time that the tuning job ended.
+ *When the experiment was created.
*/ - HyperParameterTuningEndTime?: Date; + CreationTime?: Date; /** - *The date and time that the tuning job was - * modified.
+ *When the experiment was last modified.
*/ LastModifiedTime?: Date; +} +export namespace ExperimentSummary { /** - *The TrainingJobStatusCounters object that specifies the numbers of - * training jobs, categorized by status, that this tuning job launched.
- */ - TrainingJobStatusCounters: TrainingJobStatusCounters | undefined; - - /** - *The ObjectiveStatusCounters object that specifies the numbers of - * training jobs, categorized by objective metric status, that this tuning job - * launched.
+ * @internal */ - ObjectiveStatusCounters: ObjectiveStatusCounters | undefined; + export const filterSensitiveLog = (obj: ExperimentSummary): any => ({ + ...obj, + }); +} +/** + *The container for the metadata for Fail step.
+ */ +export interface FailStepMetadata { /** - *The ResourceLimits object that specifies the maximum number of - * training jobs and parallel training jobs allowed for this tuning job.
+ *A message that you define and then is processed and rendered by + * the Fail step when the error occurs.
*/ - ResourceLimits?: ResourceLimits; + ErrorMessage?: string; } -export namespace HyperParameterTuningJobSummary { +export namespace FailStepMetadata { /** * @internal */ - export const filterSensitiveLog = (obj: HyperParameterTuningJobSummary): any => ({ + export const filterSensitiveLog = (obj: FailStepMetadata): any => ({ ...obj, }); } /** - *A SageMaker image. A SageMaker image represents a set of container images that are derived from
- * a common base container image. Each of these container images is represented by a SageMaker
- * ImageVersion
.
Amazon SageMaker Feature Store stores features in a collection called Feature Group. + * A Feature Group can be visualized as a table which has rows, + * with a unique identifier for each row where each column in the table is a feature. + * In principle, a Feature Group is composed of features and values per features.
*/ -export interface Image { +export interface FeatureGroup { /** - *When the image was created.
+ *The Amazon Resource Name (ARN) of a FeatureGroup
.
The description of the image.
+ *The name of the FeatureGroup
.
The name of the image as displayed.
+ *The name of the Feature
whose value uniquely identifies a
+ * Record
defined in the FeatureGroup
+ * FeatureDefinitions
.
When a create, update, or delete operation fails, the reason for the failure.
+ *The name of the feature that stores the EventTime
of a Record in a
+ * FeatureGroup
.
A EventTime
is point in time when a new event
+ * occurs that corresponds to the creation or update of a Record
in
+ * FeatureGroup
. All Records
in the FeatureGroup
+ * must have a corresponding EventTime
.
The Amazon Resource Name (ARN) of the image.
+ *A list of Feature
s. Each Feature
must include a
+ * FeatureName
and a FeatureType
.
Valid FeatureType
s are Integral
, Fractional
and
+ * String
.
+ * FeatureName
s cannot be any of the following: is_deleted
,
+ * write_time
, api_invocation_time
.
You can create up to 2,500 FeatureDefinition
s per
+ * FeatureGroup
.
The name of the image.
+ *The time a FeatureGroup
was created.
The status of the image.
+ *A timestamp indicating the last time you updated the feature group.
*/ - ImageStatus: ImageStatus | string | undefined; + LastModifiedTime?: Date; /** - *When the image was last modified.
+ *Use this to specify the Amazon Web Services Key Management Service (KMS) Key ID, or
+ * KMSKeyId
, for at rest data encryption. You can turn
+ * OnlineStore
on or off by specifying the EnableOnlineStore
flag
+ * at General Assembly; the default value is False
.
The configuration of an OfflineStore
.
Provide an OfflineStoreConfig
in a request to
+ * CreateFeatureGroup
to create an OfflineStore
.
To encrypt an OfflineStore
using at rest data encryption, specify Amazon Web Services Key
+ * Management Service (KMS) key ID, or KMSKeyId
, in
+ * S3StorageConfig
.
A version of a SageMaker Image
. A version represents an existing container
- * image.
When the version was created.
+ *The Amazon Resource Name (ARN) of the IAM execution role used to create the feature + * group.
*/ - CreationTime: Date | undefined; + RoleArn?: string; /** - *When a create or delete operation fails, the reason for the failure.
+ *A FeatureGroup
status.
The Amazon Resource Name (ARN) of the image the version is based on.
+ *The status of OfflineStore
.
The ARN of the version.
+ *A value that indicates whether the feature group was updated successfully.
*/ - ImageVersionArn: string | undefined; + LastUpdateStatus?: LastUpdateStatus; /** - *The status of the version.
+ *The reason that the FeatureGroup
failed to
+ * be replicated in the OfflineStore
. This is
+ * failure may be due to a failure to create a FeatureGroup
in
+ * or delete a FeatureGroup
from the OfflineStore
.
When the version was last modified.
+ *A free form description of a FeatureGroup
.
The version number.
+ *Tags used to define a FeatureGroup
.
A structure that contains a list of recommendation jobs.
- */ -export interface InferenceRecommendationsJob { +/** + *The name, Arn, CreationTime
, FeatureGroup
values,
+ * LastUpdatedTime
and EnableOnlineStorage
status of a
+ * FeatureGroup
.
The name of FeatureGroup
.
Unique identifier for the FeatureGroup
.
A timestamp indicating the time of creation time of the FeatureGroup
.
The status of a FeatureGroup. The status can be any of the following:
+ * Creating
, Created
, CreateFail
,
+ * Deleting
or DetailFail
.
The name of the job.
+ *Notifies you if replicating data into the OfflineStore
has failed. Returns
+ * either: Active
or Blocked
.
The job description.
+ * @internal */ - JobDescription: string | undefined; + export const filterSensitiveLog = (obj: FeatureGroupSummary): any => ({ + ...obj, + }); +} +/** + *The metadata for a feature. It can either be metadata that you specify, or metadata that is updated automatically.
+ */ +export interface FeatureMetadata { /** - *The recommendation job type.
+ *The Amazon Resource Number (ARN) of the feature group.
*/ - JobType: RecommendationJobType | string | undefined; + FeatureGroupArn?: string; /** - *The Amazon Resource Name (ARN) of the recommendation job.
+ *The name of the feature group containing the feature.
*/ - JobArn: string | undefined; + FeatureGroupName?: string; /** - *The status of the job.
+ *The name of feature.
*/ - Status: RecommendationJobStatus | string | undefined; + FeatureName?: string; /** - *A timestamp that shows when the job was created.
+ *The data type of the feature.
*/ - CreationTime: Date | undefined; + FeatureType?: FeatureType | string; /** - *A timestamp that shows when the job completed.
+ *A timestamp indicating when the feature was created.
*/ - CompletionTime?: Date; + CreationTime?: Date; /** - *The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker - * to perform tasks on your behalf.
+ *A timestamp indicating when the feature was last modified.
*/ - RoleArn: string | undefined; + LastModifiedTime?: Date; /** - *A timestamp that shows when the job was last modified.
+ *An optional description that you specify to better describe the feature.
*/ - LastModifiedTime: Date | undefined; + Description?: string; /** - *If the job fails, provides information why the job failed.
+ *Optional key-value pairs that you specify to better describe the feature.
*/ - FailureReason?: string; + Parameters?: FeatureParameter[]; } -export namespace InferenceRecommendationsJob { +export namespace FeatureMetadata { /** * @internal */ - export const filterSensitiveLog = (obj: InferenceRecommendationsJob): any => ({ + export const filterSensitiveLog = (obj: FeatureMetadata): any => ({ ...obj, }); } +export enum Operator { + CONTAINS = "Contains", + EQUALS = "Equals", + EXISTS = "Exists", + GREATER_THAN = "GreaterThan", + GREATER_THAN_OR_EQUAL_TO = "GreaterThanOrEqualTo", + IN = "In", + LESS_THAN = "LessThan", + LESS_THAN_OR_EQUAL_TO = "LessThanOrEqualTo", + NOT_EQUALS = "NotEquals", + NOT_EXISTS = "NotExists", +} + /** - *Provides counts for human-labeled tasks in the labeling job.
+ *A conditional statement for a search expression that includes a resource property, a + * Boolean operator, and a value. Resources that match the statement are returned in the + * results from the Search API.
+ * + *If you specify a Value
, but not an Operator
, Amazon SageMaker uses the
+ * equals operator.
In search, there are several property types:
+ *To define a metric filter, enter a value using the form
+ * "Metrics.
, where
is
+ * a metric name. For example, the following filter searches for training jobs
+ * with an "accuracy"
metric greater than
+ * "0.9"
:
+ * {
+ *
+ * "Name": "Metrics.accuracy",
+ *
+ * "Operator": "GreaterThan",
+ *
+ * "Value": "0.9"
+ *
+ * }
+ *
To define a hyperparameter filter, enter a value with the form
+ * "HyperParameters.
. Decimal hyperparameter
+ * values are treated as a decimal in a comparison if the specified
+ * Value
is also a decimal value. If the specified
+ * Value
is an integer, the decimal hyperparameter values are
+ * treated as integers. For example, the following filter is satisfied by
+ * training jobs with a "learning_rate"
hyperparameter that is
+ * less than "0.5"
:
+ * {
+ *
+ * "Name": "HyperParameters.learning_rate",
+ *
+ * "Operator": "LessThan",
+ *
+ * "Value": "0.5"
+ *
+ * }
+ *
To define a tag filter, enter a value with the form
+ * Tags.
.
The total number of data objects labeled by a human worker.
+ *A resource property name. For example, TrainingJobName
. For
+ * valid property names, see SearchRecord.
+ * You must specify a valid property for the resource.
The total number of data objects that need to be labeled by a human worker.
+ *A Boolean binary operator that is used to evaluate the filter. The operator field + * contains one of the following values:
+ *The value of Name
equals Value
.
The value of Name
doesn't equal Value
.
The Name
property exists.
The Name
property does not exist.
The value of Name
is greater than Value
.
+ * Not supported for text properties.
The value of Name
is greater than or equal to Value
.
+ * Not supported for text properties.
The value of Name
is less than Value
.
+ * Not supported for text properties.
The value of Name
is less than or equal to Value
.
+ * Not supported for text properties.
The value of Name
is one of the comma delimited strings in
+ * Value
. Only supported for text properties.
The value of Name
contains the string Value
.
+ * Only supported for text properties.
A SearchExpression
can include the Contains
operator
+ * multiple times when the value of Name
is one of the following:
+ * Experiment.DisplayName
+ *
+ * Experiment.ExperimentName
+ *
+ * Experiment.Tags
+ *
+ * Trial.DisplayName
+ *
+ * Trial.TrialName
+ *
+ * Trial.Tags
+ *
+ * TrialComponent.DisplayName
+ *
+ * TrialComponent.TrialComponentName
+ *
+ * TrialComponent.Tags
+ *
+ * TrialComponent.InputArtifacts
+ *
+ * TrialComponent.OutputArtifacts
+ *
A SearchExpression
can include only one Contains
operator
+ * for all other values of Name
. In these cases, if you include multiple
+ * Contains
operators in the SearchExpression
, the result is
+ * the following error message: "'CONTAINS' operator usage limit of 1
+ * exceeded.
"
The total number of tasks in the labeling job.
+ *A value used with Name
and Operator
to determine which
+ * resources satisfy the filter's condition. For numerical properties, Value
+ * must be an integer or floating-point decimal. For timestamp properties,
+ * Value
must be an ISO 8601 date-time string of the following format:
+ * YYYY-mm-dd'T'HH:MM:SS
.
Provides summary information for a work team.
+ *Contains summary information about the flow definition.
*/ -export interface LabelingJobForWorkteamSummary { - /** - *The name of the labeling job that the work team is assigned to.
- */ - LabelingJobName?: string; - - /** - *A unique identifier for a labeling job. You can use this to refer to a specific - * labeling job.
- */ - JobReferenceCode: string | undefined; - - /** - *The Amazon Web Services account ID of the account used to start the labeling job.
- */ - WorkRequesterAccountId: string | undefined; - - /** - *The date and time that the labeling job was created.
- */ - CreationTime: Date | undefined; - - /** - *Provides information about the progress of a labeling job.
- */ - LabelCounters?: LabelCountersForWorkteam; - - /** - *The configured number of workers per data object.
- */ - NumberOfHumanWorkersPerDataObject?: number; -} - -export namespace LabelingJobForWorkteamSummary { +export interface FlowDefinitionSummary { /** - * @internal + *The name of the flow definition.
*/ - export const filterSensitiveLog = (obj: LabelingJobForWorkteamSummary): any => ({ - ...obj, - }); -} + FlowDefinitionName: string | undefined; -/** - *Provides summary information about a labeling job.
- */ -export interface LabelingJobSummary { /** - *The name of the labeling job.
+ *The Amazon Resource Name (ARN) of the flow definition.
*/ - LabelingJobName: string | undefined; + FlowDefinitionArn: string | undefined; /** - *The Amazon Resource Name (ARN) assigned to the labeling job when it was - * created.
+ *The status of the flow definition. Valid values:
*/ - LabelingJobArn: string | undefined; + FlowDefinitionStatus: FlowDefinitionStatus | string | undefined; /** - *The date and time that the job was created (timestamp).
+ *The timestamp when SageMaker created the flow definition.
*/ CreationTime: Date | undefined; /** - *The date and time that the job was last modified (timestamp).
- */ - LastModifiedTime: Date | undefined; - - /** - *The current status of the labeling job.
- */ - LabelingJobStatus: LabelingJobStatus | string | undefined; - - /** - *Counts showing the progress of the labeling job.
- */ - LabelCounters: LabelCounters | undefined; - - /** - *The Amazon Resource Name (ARN) of the work team assigned to the job.
- */ - WorkteamArn: string | undefined; - - /** - *The Amazon Resource Name (ARN) of a Lambda function. The function is run before each - * data object is sent to a worker.
- */ - PreHumanTaskLambdaArn: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the Lambda function used to consolidate the - * annotations from individual workers into a label for a data object. For more - * information, see Annotation - * Consolidation.
- */ - AnnotationConsolidationLambdaArn?: string; - - /** - *If the LabelingJobStatus
field is Failed
, this field
- * contains a description of the error.
The reason why the flow definition creation failed. A failure reason is returned only when the flow definition status is Failed
.
The location of the output produced by the labeling job.
- */ - LabelingJobOutput?: LabelingJobOutput; - - /** - *Input configuration for the labeling job.
- */ - InputConfig?: LabelingJobInputConfig; -} - -export namespace LabelingJobSummary { - /** - * @internal - */ - export const filterSensitiveLog = (obj: LabelingJobSummary): any => ({ - ...obj, - }); -} - -/** - *Metadata for a Lambda step.
- */ -export interface LambdaStepMetadata { - /** - *The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution.
- */ - Arn?: string; - - /** - *A list of the output parameters of the Lambda step.
- */ - OutputParameters?: OutputParameter[]; } -export namespace LambdaStepMetadata { +export namespace FlowDefinitionSummary { /** * @internal */ - export const filterSensitiveLog = (obj: LambdaStepMetadata): any => ({ + export const filterSensitiveLog = (obj: FlowDefinitionSummary): any => ({ ...obj, }); } -/** - *Lists a summary of the properties of a lineage group. A lineage group provides a group of shareable lineage entity - * resources.
- */ -export interface LineageGroupSummary { - /** - *The Amazon Resource Name (ARN) of the lineage group resource.
- */ - LineageGroupArn?: string; - - /** - *The name or Amazon Resource Name (ARN) of the lineage group.
- */ - LineageGroupName?: string; - - /** - *The display name of the lineage group summary.
- */ - DisplayName?: string; - - /** - *The creation time of the lineage group summary.
- */ - CreationTime?: Date; - +export interface GetDeviceFleetReportRequest { /** - *The last modified time of the lineage group summary.
+ *The name of the fleet.
*/ - LastModifiedTime?: Date; + DeviceFleetName: string | undefined; } -export namespace LineageGroupSummary { +export namespace GetDeviceFleetReportRequest { /** * @internal */ - export const filterSensitiveLog = (obj: LineageGroupSummary): any => ({ + export const filterSensitiveLog = (obj: GetDeviceFleetReportRequest): any => ({ ...obj, }); } -export enum LineageType { - ACTION = "Action", - ARTIFACT = "Artifact", - CONTEXT = "Context", - TRIAL_COMPONENT = "TrialComponent", -} - -export enum SortActionsBy { - CREATION_TIME = "CreationTime", - NAME = "Name", -} - -export enum SortOrder { - ASCENDING = "Ascending", - DESCENDING = "Descending", -} - -export interface ListActionsRequest { +export interface GetDeviceFleetReportResponse { /** - *A filter that returns only actions with the specified source URI.
+ *The Amazon Resource Name (ARN) of the device.
*/ - SourceUri?: string; + DeviceFleetArn: string | undefined; /** - *A filter that returns only actions of the specified type.
+ *The name of the fleet.
*/ - ActionType?: string; + DeviceFleetName: string | undefined; /** - *A filter that returns only actions created on or after the specified time.
+ *The output configuration for storing sample data collected by the fleet.
*/ - CreatedAfter?: Date; + OutputConfig?: EdgeOutputConfig; /** - *A filter that returns only actions created on or before the specified time.
+ *Description of the fleet.
*/ - CreatedBefore?: Date; + Description?: string; /** - *The property used to sort results. The default value is CreationTime
.
Timestamp of when the report was generated.
*/ - SortBy?: SortActionsBy | string; + ReportGenerated?: Date; /** - *The sort order. The default value is Descending
.
Status of devices.
*/ - SortOrder?: SortOrder | string; + DeviceStats?: DeviceStats; /** - *If the previous call to ListActions
didn't return the full set of actions,
- * the call returns a token for getting the next set of actions.
The versions of Edge Manager agent deployed on the fleet.
*/ - NextToken?: string; + AgentVersions?: AgentVersion[]; /** - *The maximum number of actions to return in the response. The default value is 10.
+ *Status of model on device.
*/ - MaxResults?: number; + ModelStats?: EdgeModelStat[]; } -export namespace ListActionsRequest { +export namespace GetDeviceFleetReportResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListActionsRequest): any => ({ + export const filterSensitiveLog = (obj: GetDeviceFleetReportResponse): any => ({ ...obj, }); } -export interface ListActionsResponse { - /** - *A list of actions and their properties.
- */ - ActionSummaries?: ActionSummary[]; - +export interface GetLineageGroupPolicyRequest { /** - *A token for getting the next set of actions, if there are any.
+ *The name or Amazon Resource Name (ARN) of the lineage group.
*/ - NextToken?: string; + LineageGroupName: string | undefined; } -export namespace ListActionsResponse { +export namespace GetLineageGroupPolicyRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListActionsResponse): any => ({ + export const filterSensitiveLog = (obj: GetLineageGroupPolicyRequest): any => ({ ...obj, }); } -export interface ListAlgorithmsInput { +export interface GetLineageGroupPolicyResponse { /** - *A filter that returns only algorithms created after the specified time - * (timestamp).
+ *The Amazon Resource Name (ARN) of the lineage group.
*/ - CreationTimeAfter?: Date; + LineageGroupArn?: string; /** - *A filter that returns only algorithms created before the specified time - * (timestamp).
+ *The resource policy that gives access to the lineage group in another account.
*/ - CreationTimeBefore?: Date; + ResourcePolicy?: string; +} +export namespace GetLineageGroupPolicyResponse { /** - *The maximum number of algorithms to return in the response.
+ * @internal */ - MaxResults?: number; + export const filterSensitiveLog = (obj: GetLineageGroupPolicyResponse): any => ({ + ...obj, + }); +} +export interface GetModelPackageGroupPolicyInput { /** - *A string in the algorithm name. This filter returns only algorithms whose name - * contains the specified string.
+ *The name of the model group for which to get the resource policy.
*/ - NameContains?: string; + ModelPackageGroupName: string | undefined; +} +export namespace GetModelPackageGroupPolicyInput { /** - *If the response to a previous ListAlgorithms
request was truncated, the
- * response includes a NextToken
. To retrieve the next set of algorithms, use
- * the token in the next request.
The parameter by which to sort the results. The default is
- * CreationTime
.
The resource policy for the model group.
*/ - SortBy?: AlgorithmSortBy | string; + ResourcePolicy: string | undefined; +} +export namespace GetModelPackageGroupPolicyOutput { /** - *The sort order for the results. The default is Ascending
.
>An array of AlgorithmSummary
objects, each of which lists an
- * algorithm.
If the response is truncated, SageMaker returns this token. To retrieve the next set of - * algorithms, use it in the subsequent request.
+ *Whether Service Catalog is enabled or disabled in SageMaker.
*/ - NextToken?: string; + Status?: SagemakerServicecatalogStatus | string; } -export namespace ListAlgorithmsOutput { +export namespace GetSagemakerServicecatalogPortfolioStatusOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListAlgorithmsOutput): any => ({ + export const filterSensitiveLog = (obj: GetSagemakerServicecatalogPortfolioStatusOutput): any => ({ ...obj, }); } -export interface ListAppImageConfigsRequest { - /** - *The maximum number of AppImageConfigs to return in the response. The default value is - * 10.
- */ - MaxResults?: number; +export enum ResourceType { + ENDPOINT = "Endpoint", + EXPERIMENT = "Experiment", + EXPERIMENT_TRIAL = "ExperimentTrial", + EXPERIMENT_TRIAL_COMPONENT = "ExperimentTrialComponent", + FEATURE_GROUP = "FeatureGroup", + FEATURE_METADATA = "FeatureMetadata", + MODEL_PACKAGE = "ModelPackage", + MODEL_PACKAGE_GROUP = "ModelPackageGroup", + PIPELINE = "Pipeline", + PIPELINE_EXECUTION = "PipelineExecution", + PROJECT = "Project", + TRAINING_JOB = "TrainingJob", +} +/** + *Part of the SuggestionQuery
type. Specifies a hint for retrieving property
+ * names that begin with the specified text.
If the previous call to ListImages
didn't return the full set of
- * AppImageConfigs, the call returns a token for getting the next set of AppImageConfigs.
Text that begins a property's name.
*/ - NextToken?: string; + PropertyNameHint: string | undefined; +} +export namespace PropertyNameQuery { /** - *A filter that returns only AppImageConfigs whose name contains the specified string.
+ * @internal */ - NameContains?: string; + export const filterSensitiveLog = (obj: PropertyNameQuery): any => ({ + ...obj, + }); +} +/** + *Specified in the GetSearchSuggestions request. + * Limits the property names that are included in the response.
+ */ +export interface SuggestionQuery { /** - *A filter that returns only AppImageConfigs created on or before the specified time.
+ *Defines a property name hint. Only property + * names that begin with the specified hint are included in the response.
*/ - CreationTimeBefore?: Date; + PropertyNameQuery?: PropertyNameQuery; +} +export namespace SuggestionQuery { /** - *A filter that returns only AppImageConfigs created on or after the specified time.
+ * @internal */ - CreationTimeAfter?: Date; + export const filterSensitiveLog = (obj: SuggestionQuery): any => ({ + ...obj, + }); +} +export interface GetSearchSuggestionsRequest { /** - *A filter that returns only AppImageConfigs modified on or before the specified time.
+ *The name of the Amazon SageMaker resource to search for.
*/ - ModifiedTimeBefore?: Date; + Resource: ResourceType | string | undefined; /** - *A filter that returns only AppImageConfigs modified on or after the specified time.
+ *Limits the property names that are included in the response.
*/ - ModifiedTimeAfter?: Date; + SuggestionQuery?: SuggestionQuery; +} +export namespace GetSearchSuggestionsRequest { /** - *The property used to sort results. The default value is CreationTime
.
A property name returned from a GetSearchSuggestions
call that specifies
+ * a value in the PropertyNameQuery
field.
The sort order. The default value is Descending
.
A suggested property name based on what you entered in the search textbox in the Amazon SageMaker + * console.
*/ - SortOrder?: SortOrder | string; + PropertyName?: string; } -export namespace ListAppImageConfigsRequest { +export namespace PropertyNameSuggestion { /** * @internal */ - export const filterSensitiveLog = (obj: ListAppImageConfigsRequest): any => ({ + export const filterSensitiveLog = (obj: PropertyNameSuggestion): any => ({ ...obj, }); } -export interface ListAppImageConfigsResponse { - /** - *A token for getting the next set of AppImageConfigs, if there are any.
- */ - NextToken?: string; - +export interface GetSearchSuggestionsResponse { /** - *A list of AppImageConfigs and their properties.
+ *A list of property names for a Resource
that match a
+ * SuggestionQuery
.
If the previous response was truncated, you will receive this token. - * Use it in your next request to receive the next set of results.
- */ - NextToken?: string; - +/** + *Specifies configuration details for a Git repository when the repository is + * updated.
+ */ +export interface GitConfigForUpdate { /** - *Returns a list up to a specified limit.
+ *The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the
+ * credentials used to access the git repository. The secret must have a staging label of
+ * AWSCURRENT
and must be in the following format:
+ * {"username": UserName, "password":
+ * Password}
+ *
The sort order for the results. The default is Ascending.
+ * @internal */ - SortOrder?: SortOrder | string; + export const filterSensitiveLog = (obj: GitConfigForUpdate): any => ({ + ...obj, + }); +} +/** + *Container for human task user interface information.
+ */ +export interface HumanTaskUiSummary { /** - *The parameter by which to sort the results. The default is CreationTime.
+ *The name of the human task user interface.
*/ - SortBy?: AppSortKey | string; + HumanTaskUiName: string | undefined; /** - *A parameter to search for the domain ID.
+ *The Amazon Resource Name (ARN) of the human task user interface.
*/ - DomainIdEquals?: string; + HumanTaskUiArn: string | undefined; /** - *A parameter to search by user profile name.
+ *A timestamp when SageMaker created the human task user interface.
*/ - UserProfileNameEquals?: string; + CreationTime: Date | undefined; } -export namespace ListAppsRequest { +export namespace HumanTaskUiSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListAppsRequest): any => ({ + export const filterSensitiveLog = (obj: HumanTaskUiSummary): any => ({ ...obj, }); } -export interface ListAppsResponse { - /** - *The list of apps.
- */ - Apps?: AppDetails[]; +export enum HyperParameterTuningJobSortByOptions { + CreationTime = "CreationTime", + Name = "Name", + Status = "Status", +} - /** - *If the previous response was truncated, you will receive this token. - * Use it in your next request to receive the next set of results.
+/** + *Provides summary information about a hyperparameter tuning job.
+ */ +export interface HyperParameterTuningJobSummary { + /** + *The name of the tuning job.
*/ - NextToken?: string; -} + HyperParameterTuningJobName: string | undefined; -export namespace ListAppsResponse { /** - * @internal + *The + * Amazon + * Resource Name (ARN) of the tuning job.
*/ - export const filterSensitiveLog = (obj: ListAppsResponse): any => ({ - ...obj, - }); -} - -export enum SortArtifactsBy { - CREATION_TIME = "CreationTime", -} + HyperParameterTuningJobArn: string | undefined; -export interface ListArtifactsRequest { /** - *A filter that returns only artifacts with the specified source URI.
+ *The status of the + * tuning + * job.
*/ - SourceUri?: string; + HyperParameterTuningJobStatus: HyperParameterTuningJobStatus | string | undefined; /** - *A filter that returns only artifacts of the specified type.
+ *Specifies the search strategy hyperparameter tuning uses to choose which + * hyperparameters to + * use + * for each iteration. Currently, the only valid value is + * Bayesian.
*/ - ArtifactType?: string; + Strategy: HyperParameterTuningJobStrategyType | string | undefined; /** - *A filter that returns only artifacts created on or after the specified time.
+ *The date and time that the tuning job was created.
*/ - CreatedAfter?: Date; + CreationTime: Date | undefined; /** - *A filter that returns only artifacts created on or before the specified time.
+ *The date and time that the tuning job ended.
*/ - CreatedBefore?: Date; + HyperParameterTuningEndTime?: Date; /** - *The property used to sort results. The default value is CreationTime
.
The date and time that the tuning job was + * modified.
*/ - SortBy?: SortArtifactsBy | string; + LastModifiedTime?: Date; /** - *The sort order. The default value is Descending
.
The TrainingJobStatusCounters object that specifies the numbers of + * training jobs, categorized by status, that this tuning job launched.
*/ - SortOrder?: SortOrder | string; + TrainingJobStatusCounters: TrainingJobStatusCounters | undefined; /** - *If the previous call to ListArtifacts
didn't return the full set of artifacts,
- * the call returns a token for getting the next set of artifacts.
The ObjectiveStatusCounters object that specifies the numbers of + * training jobs, categorized by objective metric status, that this tuning job + * launched.
*/ - NextToken?: string; + ObjectiveStatusCounters: ObjectiveStatusCounters | undefined; /** - *The maximum number of artifacts to return in the response. The default value is 10.
+ *The ResourceLimits object that specifies the maximum number of + * training jobs and parallel training jobs allowed for this tuning job.
*/ - MaxResults?: number; + ResourceLimits?: ResourceLimits; } -export namespace ListArtifactsRequest { +export namespace HyperParameterTuningJobSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListArtifactsRequest): any => ({ + export const filterSensitiveLog = (obj: HyperParameterTuningJobSummary): any => ({ ...obj, }); } -export interface ListArtifactsResponse { +/** + *A SageMaker image. A SageMaker image represents a set of container images that are derived from
+ * a common base container image. Each of these container images is represented by a SageMaker
+ * ImageVersion
.
A list of artifacts and their properties.
+ *When the image was created.
*/ - ArtifactSummaries?: ArtifactSummary[]; + CreationTime: Date | undefined; /** - *A token for getting the next set of artifacts, if there are any.
+ *The description of the image.
*/ - NextToken?: string; -} + Description?: string; -export namespace ListArtifactsResponse { /** - * @internal + *The name of the image as displayed.
*/ - export const filterSensitiveLog = (obj: ListArtifactsResponse): any => ({ - ...obj, - }); -} - -export enum SortAssociationsBy { - CREATION_TIME = "CreationTime", - DESTINATION_ARN = "DestinationArn", - DESTINATION_TYPE = "DestinationType", - SOURCE_ARN = "SourceArn", - SOURCE_TYPE = "SourceType", -} + DisplayName?: string; -export interface ListAssociationsRequest { /** - *A filter that returns only associations with the specified source ARN.
+ *When a create, update, or delete operation fails, the reason for the failure.
*/ - SourceArn?: string; + FailureReason?: string; /** - *A filter that returns only associations with the specified destination Amazon Resource Name (ARN).
+ *The Amazon Resource Name (ARN) of the image.
*/ - DestinationArn?: string; + ImageArn: string | undefined; /** - *A filter that returns only associations with the specified source type.
+ *The name of the image.
*/ - SourceType?: string; + ImageName: string | undefined; /** - *A filter that returns only associations with the specified destination type.
+ *The status of the image.
*/ - DestinationType?: string; + ImageStatus: ImageStatus | string | undefined; /** - *A filter that returns only associations of the specified type.
+ *When the image was last modified.
*/ - AssociationType?: AssociationEdgeType | string; + LastModifiedTime: Date | undefined; +} +export namespace Image { /** - *A filter that returns only associations created on or after the specified time.
+ * @internal */ - CreatedAfter?: Date; + export const filterSensitiveLog = (obj: Image): any => ({ + ...obj, + }); +} - /** - *A filter that returns only associations created on or before the specified time.
- */ - CreatedBefore?: Date; +export enum ImageSortBy { + CREATION_TIME = "CREATION_TIME", + IMAGE_NAME = "IMAGE_NAME", + LAST_MODIFIED_TIME = "LAST_MODIFIED_TIME", +} + +export enum ImageSortOrder { + ASCENDING = "ASCENDING", + DESCENDING = "DESCENDING", +} +/** + *A version of a SageMaker Image
. A version represents an existing container
+ * image.
The property used to sort results. The default value is CreationTime
.
When the version was created.
*/ - SortBy?: SortAssociationsBy | string; + CreationTime: Date | undefined; /** - *The sort order. The default value is Descending
.
When a create or delete operation fails, the reason for the failure.
*/ - SortOrder?: SortOrder | string; + FailureReason?: string; /** - *If the previous call to ListAssociations
didn't return the full set of associations,
- * the call returns a token for getting the next set of associations.
The Amazon Resource Name (ARN) of the image the version is based on.
*/ - NextToken?: string; + ImageArn: string | undefined; /** - *The maximum number of associations to return in the response. The default value is 10.
+ *The ARN of the version.
*/ - MaxResults?: number; -} + ImageVersionArn: string | undefined; -export namespace ListAssociationsRequest { /** - * @internal + *The status of the version.
*/ - export const filterSensitiveLog = (obj: ListAssociationsRequest): any => ({ - ...obj, - }); -} + ImageVersionStatus: ImageVersionStatus | string | undefined; -export interface ListAssociationsResponse { /** - *A list of associations and their properties.
+ *When the version was last modified.
*/ - AssociationSummaries?: AssociationSummary[]; + LastModifiedTime: Date | undefined; /** - *A token for getting the next set of associations, if there are any.
+ *The version number.
*/ - NextToken?: string; + Version: number | undefined; } -export namespace ListAssociationsResponse { +export namespace ImageVersion { /** * @internal */ - export const filterSensitiveLog = (obj: ListAssociationsResponse): any => ({ + export const filterSensitiveLog = (obj: ImageVersion): any => ({ ...obj, }); } -export interface ListAutoMLJobsRequest { +export enum ImageVersionSortBy { + CREATION_TIME = "CREATION_TIME", + LAST_MODIFIED_TIME = "LAST_MODIFIED_TIME", + VERSION = "VERSION", +} + +export enum ImageVersionSortOrder { + ASCENDING = "ASCENDING", + DESCENDING = "DESCENDING", +} + +/** + *A structure that contains a list of recommendation jobs.
+ */ +export interface InferenceRecommendationsJob { /** - *Request a list of jobs, using a filter for time.
+ *The name of the job.
*/ - CreationTimeAfter?: Date; + JobName: string | undefined; /** - *Request a list of jobs, using a filter for time.
+ *The job description.
*/ - CreationTimeBefore?: Date; + JobDescription: string | undefined; /** - *Request a list of jobs, using a filter for time.
+ *The recommendation job type.
*/ - LastModifiedTimeAfter?: Date; + JobType: RecommendationJobType | string | undefined; /** - *Request a list of jobs, using a filter for time.
+ *The Amazon Resource Name (ARN) of the recommendation job.
*/ - LastModifiedTimeBefore?: Date; + JobArn: string | undefined; /** - *Request a list of jobs, using a search filter for name.
+ *The status of the job.
*/ - NameContains?: string; + Status: RecommendationJobStatus | string | undefined; /** - *Request a list of jobs, using a filter for status.
+ *A timestamp that shows when the job was created.
*/ - StatusEquals?: AutoMLJobStatus | string; + CreationTime: Date | undefined; /** - *The sort order for the results. The default is Descending
.
A timestamp that shows when the job completed.
*/ - SortOrder?: AutoMLSortOrder | string; + CompletionTime?: Date; /** - *The parameter by which to sort the results. The default is Name
.
The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker + * to perform tasks on your behalf.
*/ - SortBy?: AutoMLSortBy | string; + RoleArn: string | undefined; /** - *Request a list of jobs up to a specified limit.
+ *A timestamp that shows when the job was last modified.
*/ - MaxResults?: number; + LastModifiedTime: Date | undefined; /** - *If the previous response was truncated, you receive this token. Use it in your next - * request to receive the next set of results.
+ *If the job fails, provides information why the job failed.
*/ - NextToken?: string; + FailureReason?: string; } -export namespace ListAutoMLJobsRequest { +export namespace InferenceRecommendationsJob { /** * @internal */ - export const filterSensitiveLog = (obj: ListAutoMLJobsRequest): any => ({ + export const filterSensitiveLog = (obj: InferenceRecommendationsJob): any => ({ ...obj, }); } -export interface ListAutoMLJobsResponse { +/** + *Provides counts for human-labeled tasks in the labeling job.
+ */ +export interface LabelCountersForWorkteam { + /** + *The total number of data objects labeled by a human worker.
+ */ + HumanLabeled?: number; + /** - *Returns a summary list of jobs.
+ *The total number of data objects that need to be labeled by a human worker.
*/ - AutoMLJobSummaries: AutoMLJobSummary[] | undefined; + PendingHuman?: number; /** - *If the previous response was truncated, you receive this token. Use it in your next - * request to receive the next set of results.
+ *The total number of tasks in the labeling job.
*/ - NextToken?: string; + Total?: number; } -export namespace ListAutoMLJobsResponse { +export namespace LabelCountersForWorkteam { /** * @internal */ - export const filterSensitiveLog = (obj: ListAutoMLJobsResponse): any => ({ + export const filterSensitiveLog = (obj: LabelCountersForWorkteam): any => ({ ...obj, }); } -export interface ListCandidatesForAutoMLJobRequest { - /** - *List the candidates created for the job by providing the job's name.
- */ - AutoMLJobName: string | undefined; - +/** + *Provides summary information for a work team.
+ */ +export interface LabelingJobForWorkteamSummary { /** - *List the candidates for the job and filter by status.
+ *The name of the labeling job that the work team is assigned to.
*/ - StatusEquals?: CandidateStatus | string; + LabelingJobName?: string; /** - *List the candidates for the job and filter by candidate name.
+ *A unique identifier for a labeling job. You can use this to refer to a specific + * labeling job.
*/ - CandidateNameEquals?: string; + JobReferenceCode: string | undefined; /** - *The sort order for the results. The default is Ascending
.
The Amazon Web Services account ID of the account used to start the labeling job.
*/ - SortOrder?: AutoMLSortOrder | string; + WorkRequesterAccountId: string | undefined; /** - *The parameter by which to sort the results. The default is
- * Descending
.
The date and time that the labeling job was created.
*/ - SortBy?: CandidateSortBy | string; + CreationTime: Date | undefined; /** - *List the job's candidates up to a specified limit.
+ *Provides information about the progress of a labeling job.
*/ - MaxResults?: number; + LabelCounters?: LabelCountersForWorkteam; /** - *If the previous response was truncated, you receive this token. Use it in your next - * request to receive the next set of results.
+ *The configured number of workers per data object.
*/ - NextToken?: string; + NumberOfHumanWorkersPerDataObject?: number; } -export namespace ListCandidatesForAutoMLJobRequest { +export namespace LabelingJobForWorkteamSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListCandidatesForAutoMLJobRequest): any => ({ + export const filterSensitiveLog = (obj: LabelingJobForWorkteamSummary): any => ({ ...obj, }); } -export interface ListCandidatesForAutoMLJobResponse { +/** + *Provides summary information about a labeling job.
+ */ +export interface LabelingJobSummary { /** - *Summaries about the AutoMLCandidates
.
The name of the labeling job.
*/ - Candidates: AutoMLCandidate[] | undefined; + LabelingJobName: string | undefined; /** - *If the previous response was truncated, you receive this token. Use it in your next - * request to receive the next set of results.
+ *The Amazon Resource Name (ARN) assigned to the labeling job when it was + * created.
*/ - NextToken?: string; -} + LabelingJobArn: string | undefined; -export namespace ListCandidatesForAutoMLJobResponse { /** - * @internal + *The date and time that the job was created (timestamp).
*/ - export const filterSensitiveLog = (obj: ListCandidatesForAutoMLJobResponse): any => ({ - ...obj, - }); -} + CreationTime: Date | undefined; -export interface ListCodeRepositoriesInput { /** - *A filter that returns only Git repositories that were created after the specified - * time.
+ *The date and time that the job was last modified (timestamp).
*/ - CreationTimeAfter?: Date; + LastModifiedTime: Date | undefined; /** - *A filter that returns only Git repositories that were created before the specified - * time.
+ *The current status of the labeling job.
*/ - CreationTimeBefore?: Date; + LabelingJobStatus: LabelingJobStatus | string | undefined; /** - *A filter that returns only Git repositories that were last modified after the - * specified time.
+ *Counts showing the progress of the labeling job.
*/ - LastModifiedTimeAfter?: Date; + LabelCounters: LabelCounters | undefined; /** - *A filter that returns only Git repositories that were last modified before the - * specified time.
+ *The Amazon Resource Name (ARN) of the work team assigned to the job.
*/ - LastModifiedTimeBefore?: Date; + WorkteamArn: string | undefined; /** - *The maximum number of Git repositories to return in the response.
+ *The Amazon Resource Name (ARN) of a Lambda function. The function is run before each + * data object is sent to a worker.
*/ - MaxResults?: number; + PreHumanTaskLambdaArn: string | undefined; /** - *A string in the Git repositories name. This filter returns only repositories whose - * name contains the specified string.
+ *The Amazon Resource Name (ARN) of the Lambda function used to consolidate the + * annotations from individual workers into a label for a data object. For more + * information, see Annotation + * Consolidation.
*/ - NameContains?: string; + AnnotationConsolidationLambdaArn?: string; /** - *If the result of a ListCodeRepositoriesOutput
request was truncated, the
- * response includes a NextToken
. To get the next set of Git repositories, use
- * the token in the next request.
If the LabelingJobStatus
field is Failed
, this field
+ * contains a description of the error.
The field to sort results by. The default is Name
.
The location of the output produced by the labeling job.
*/ - SortBy?: CodeRepositorySortBy | string; + LabelingJobOutput?: LabelingJobOutput; /** - *The sort order for results. The default is Ascending
.
Input configuration for the labeling job.
*/ - SortOrder?: CodeRepositorySortOrder | string; + InputConfig?: LabelingJobInputConfig; } -export namespace ListCodeRepositoriesInput { +export namespace LabelingJobSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListCodeRepositoriesInput): any => ({ + export const filterSensitiveLog = (obj: LabelingJobSummary): any => ({ ...obj, }); } -export interface ListCodeRepositoriesOutput { +/** + *Metadata for a Lambda step.
+ */ +export interface LambdaStepMetadata { /** - *Gets a list of summaries of the Git repositories. Each summary specifies the following - * values for the repository:
- *Name
- *Amazon Resource Name (ARN)
- *Creation time
- *Last modified time
- *Configuration information, including the URL location of the repository and - * the ARN of the Amazon Web Services Secrets Manager secret that contains the credentials used - * to access the repository.
- *The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution.
*/ - CodeRepositorySummaryList: CodeRepositorySummary[] | undefined; + Arn?: string; /** - *If the result of a ListCodeRepositoriesOutput
request was truncated, the
- * response includes a NextToken
. To get the next set of Git repositories, use
- * the token in the next request.
A list of the output parameters of the Lambda step.
*/ - NextToken?: string; + OutputParameters?: OutputParameter[]; } -export namespace ListCodeRepositoriesOutput { +export namespace LambdaStepMetadata { /** * @internal */ - export const filterSensitiveLog = (obj: ListCodeRepositoriesOutput): any => ({ + export const filterSensitiveLog = (obj: LambdaStepMetadata): any => ({ ...obj, }); } -export enum ListCompilationJobsSortBy { - CREATION_TIME = "CreationTime", - NAME = "Name", - STATUS = "Status", -} +/** + *Lists a summary of the properties of a lineage group. A lineage group provides a group of shareable lineage entity + * resources.
+ */ +export interface LineageGroupSummary { + /** + *The Amazon Resource Name (ARN) of the lineage group resource.
+ */ + LineageGroupArn?: string; -export interface ListCompilationJobsRequest { /** - *If the result of the previous ListCompilationJobs
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of model
- * compilation jobs, use the token in the next request.
The name or Amazon Resource Name (ARN) of the lineage group.
*/ - NextToken?: string; + LineageGroupName?: string; /** - *The maximum number of model compilation jobs to return in the response.
+ *The display name of the lineage group summary.
*/ - MaxResults?: number; + DisplayName?: string; /** - *A filter that returns the model compilation jobs that were created after a specified - * time.
+ *The creation time of the lineage group summary.
*/ - CreationTimeAfter?: Date; + CreationTime?: Date; /** - *A filter that returns the model compilation jobs that were created before a specified - * time.
+ *The last modified time of the lineage group summary.
*/ - CreationTimeBefore?: Date; + LastModifiedTime?: Date; +} +export namespace LineageGroupSummary { /** - *A filter that returns the model compilation jobs that were modified after a specified - * time.
+ * @internal */ - LastModifiedTimeAfter?: Date; + export const filterSensitiveLog = (obj: LineageGroupSummary): any => ({ + ...obj, + }); +} + +export enum LineageType { + ACTION = "Action", + ARTIFACT = "Artifact", + CONTEXT = "Context", + TRIAL_COMPONENT = "TrialComponent", +} + +export enum SortActionsBy { + CREATION_TIME = "CreationTime", + NAME = "Name", +} + +export enum SortOrder { + ASCENDING = "Ascending", + DESCENDING = "Descending", +} +export interface ListActionsRequest { /** - *A filter that returns the model compilation jobs that were modified before a specified - * time.
+ *A filter that returns only actions with the specified source URI.
*/ - LastModifiedTimeBefore?: Date; + SourceUri?: string; + + /** + *A filter that returns only actions of the specified type.
+ */ + ActionType?: string; + + /** + *A filter that returns only actions created on or after the specified time.
+ */ + CreatedAfter?: Date; + + /** + *A filter that returns only actions created on or before the specified time.
+ */ + CreatedBefore?: Date; /** - *A filter that returns the model compilation jobs whose name contains a specified - * string.
+ *The property used to sort results. The default value is CreationTime
.
A filter that retrieves model compilation jobs with a specific DescribeCompilationJobResponse$CompilationJobStatus status.
+ *The sort order. The default value is Descending
.
The field by which to sort results. The default is CreationTime
.
If the previous call to ListActions
didn't return the full set of actions,
+ * the call returns a token for getting the next set of actions.
The sort order for results. The default is Ascending
.
The maximum number of actions to return in the response. The default value is 10.
*/ - SortOrder?: SortOrder | string; + MaxResults?: number; } -export namespace ListCompilationJobsRequest { +export namespace ListActionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListCompilationJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListActionsRequest): any => ({ ...obj, }); } -export interface ListCompilationJobsResponse { +export interface ListActionsResponse { /** - *An array of CompilationJobSummary objects, each describing a model - * compilation job.
+ *A list of actions and their properties.
*/ - CompilationJobSummaries: CompilationJobSummary[] | undefined; + ActionSummaries?: ActionSummary[]; /** - *If the response is truncated, Amazon SageMaker returns this NextToken
. To retrieve
- * the next set of model compilation jobs, use this token in the next request.
A token for getting the next set of actions, if there are any.
*/ NextToken?: string; } -export namespace ListCompilationJobsResponse { +export namespace ListActionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListCompilationJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListActionsResponse): any => ({ ...obj, }); } -export enum SortContextsBy { - CREATION_TIME = "CreationTime", - NAME = "Name", -} - -export interface ListContextsRequest { - /** - *A filter that returns only contexts with the specified source URI.
- */ - SourceUri?: string; - +export interface ListAlgorithmsInput { /** - *A filter that returns only contexts of the specified type.
+ *A filter that returns only algorithms created after the specified time + * (timestamp).
*/ - ContextType?: string; + CreationTimeAfter?: Date; /** - *A filter that returns only contexts created on or after the specified time.
+ *A filter that returns only algorithms created before the specified time + * (timestamp).
*/ - CreatedAfter?: Date; + CreationTimeBefore?: Date; /** - *A filter that returns only contexts created on or before the specified time.
+ *The maximum number of algorithms to return in the response.
*/ - CreatedBefore?: Date; + MaxResults?: number; /** - *The property used to sort results. The default value is CreationTime
.
A string in the algorithm name. This filter returns only algorithms whose name + * contains the specified string.
*/ - SortBy?: SortContextsBy | string; + NameContains?: string; /** - *The sort order. The default value is Descending
.
If the response to a previous ListAlgorithms
request was truncated, the
+ * response includes a NextToken
. To retrieve the next set of algorithms, use
+ * the token in the next request.
If the previous call to ListContexts
didn't return the full set of contexts,
- * the call returns a token for getting the next set of contexts.
The parameter by which to sort the results. The default is
+ * CreationTime
.
The maximum number of contexts to return in the response. The default value is 10.
+ *The sort order for the results. The default is Ascending
.
A list of contexts and their properties.
+ *>An array of AlgorithmSummary
objects, each of which lists an
+ * algorithm.
A token for getting the next set of contexts, if there are any.
+ *If the response is truncated, SageMaker returns this token. To retrieve the next set of + * algorithms, use it in the subsequent request.
*/ NextToken?: string; } -export namespace ListContextsResponse { +export namespace ListAlgorithmsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListContextsResponse): any => ({ + export const filterSensitiveLog = (obj: ListAlgorithmsOutput): any => ({ ...obj, }); } -export enum MonitoringJobDefinitionSortKey { - CREATION_TIME = "CreationTime", - NAME = "Name", -} +export interface ListAppImageConfigsRequest { + /** + *The maximum number of AppImageConfigs to return in the response. The default value is + * 10.
+ */ + MaxResults?: number; -export interface ListDataQualityJobDefinitionsRequest { /** - *A filter that lists the data quality job definitions associated with the specified - * endpoint.
+ *If the previous call to ListImages
didn't return the full set of
+ * AppImageConfigs, the call returns a token for getting the next set of AppImageConfigs.
The field to sort results by. The default is CreationTime
.
A filter that returns only AppImageConfigs whose name contains the specified string.
*/ - SortBy?: MonitoringJobDefinitionSortKey | string; + NameContains?: string; /** - *The sort order for results. The default is Descending
.
A filter that returns only AppImageConfigs created on or before the specified time.
*/ - SortOrder?: SortOrder | string; + CreationTimeBefore?: Date; /** - *If the result of the previous ListDataQualityJobDefinitions
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * transform jobs, use the token in the next request.>
A filter that returns only AppImageConfigs created on or after the specified time.
*/ - NextToken?: string; + CreationTimeAfter?: Date; /** - *The maximum number of data quality monitoring job definitions to return in the - * response.
+ *A filter that returns only AppImageConfigs modified on or before the specified time.
*/ - MaxResults?: number; + ModifiedTimeBefore?: Date; /** - *A string in the data quality monitoring job definition name. This filter returns only - * data quality monitoring job definitions whose name contains the specified string.
+ *A filter that returns only AppImageConfigs modified on or after the specified time.
*/ - NameContains?: string; + ModifiedTimeAfter?: Date; /** - *A filter that returns only data quality monitoring job definitions created before the - * specified time.
+ *The property used to sort results. The default value is CreationTime
.
A filter that returns only data quality monitoring job definitions created after the - * specified time.
+ *The sort order. The default value is Descending
.
Summary information about a monitoring job.
- */ -export interface MonitoringJobDefinitionSummary { +export interface ListAppImageConfigsResponse { /** - *The name of the monitoring job.
+ *A token for getting the next set of AppImageConfigs, if there are any.
*/ - MonitoringJobDefinitionName: string | undefined; + NextToken?: string; /** - *The Amazon Resource Name (ARN) of the monitoring job.
+ *A list of AppImageConfigs and their properties.
*/ - MonitoringJobDefinitionArn: string | undefined; + AppImageConfigs?: AppImageConfigDetails[]; +} +export namespace ListAppImageConfigsResponse { /** - *The time that the monitoring job was created.
+ * @internal */ - CreationTime: Date | undefined; + export const filterSensitiveLog = (obj: ListAppImageConfigsResponse): any => ({ + ...obj, + }); +} +export interface ListAppsRequest { /** - *The name of the endpoint that the job monitors.
+ *If the previous response was truncated, you will receive this token. + * Use it in your next request to receive the next set of results.
*/ - EndpointName: string | undefined; + NextToken?: string; + + /** + *Returns a list up to a specified limit.
+ */ + MaxResults?: number; + + /** + *The sort order for the results. The default is Ascending.
+ */ + SortOrder?: SortOrder | string; + + /** + *The parameter by which to sort the results. The default is CreationTime.
+ */ + SortBy?: AppSortKey | string; + + /** + *A parameter to search for the domain ID.
+ */ + DomainIdEquals?: string; + + /** + *A parameter to search by user profile name.
+ */ + UserProfileNameEquals?: string; } -export namespace MonitoringJobDefinitionSummary { +export namespace ListAppsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: MonitoringJobDefinitionSummary): any => ({ + export const filterSensitiveLog = (obj: ListAppsRequest): any => ({ ...obj, }); } -export interface ListDataQualityJobDefinitionsResponse { +export interface ListAppsResponse { /** - *A list of data quality monitoring job definitions.
+ *The list of apps.
*/ - JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; + Apps?: AppDetails[]; /** - *If the result of the previous ListDataQualityJobDefinitions
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of data
- * quality monitoring job definitions, use the token in the next request.
If the previous response was truncated, you will receive this token. + * Use it in your next request to receive the next set of results.
*/ NextToken?: string; } -export namespace ListDataQualityJobDefinitionsResponse { +export namespace ListAppsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListDataQualityJobDefinitionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListAppsResponse): any => ({ ...obj, }); } -export enum ListDeviceFleetsSortBy { - CreationTime = "CREATION_TIME", - LastModifiedTime = "LAST_MODIFIED_TIME", - Name = "NAME", +export enum SortArtifactsBy { + CREATION_TIME = "CreationTime", } -export interface ListDeviceFleetsRequest { - /** - *The response from the last list when returning a list large enough to need tokening.
- */ - NextToken?: string; - +export interface ListArtifactsRequest { /** - *The maximum number of results to select.
+ *A filter that returns only artifacts with the specified source URI.
*/ - MaxResults?: number; + SourceUri?: string; /** - *Filter fleets where packaging job was created after specified time.
+ *A filter that returns only artifacts of the specified type.
*/ - CreationTimeAfter?: Date; + ArtifactType?: string; /** - *Filter fleets where the edge packaging job was created before specified time.
+ *A filter that returns only artifacts created on or after the specified time.
*/ - CreationTimeBefore?: Date; + CreatedAfter?: Date; /** - *Select fleets where the job was updated after X
+ *A filter that returns only artifacts created on or before the specified time.
*/ - LastModifiedTimeAfter?: Date; + CreatedBefore?: Date; /** - *Select fleets where the job was updated before X
+ *The property used to sort results. The default value is CreationTime
.
Filter for fleets containing this name in their fleet device name.
+ *The sort order. The default value is Descending
.
The column to sort by.
+ *If the previous call to ListArtifacts
didn't return the full set of artifacts,
+ * the call returns a token for getting the next set of artifacts.
What direction to sort in.
+ *The maximum number of artifacts to return in the response. The default value is 10.
*/ - SortOrder?: SortOrder | string; + MaxResults?: number; } -export namespace ListDeviceFleetsRequest { +export namespace ListArtifactsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListDeviceFleetsRequest): any => ({ + export const filterSensitiveLog = (obj: ListArtifactsRequest): any => ({ ...obj, }); } -export interface ListDeviceFleetsResponse { +export interface ListArtifactsResponse { /** - *Summary of the device fleet.
+ *A list of artifacts and their properties.
*/ - DeviceFleetSummaries: DeviceFleetSummary[] | undefined; + ArtifactSummaries?: ArtifactSummary[]; /** - *The response from the last list when returning a list large enough to need tokening.
+ *A token for getting the next set of artifacts, if there are any.
*/ NextToken?: string; } -export namespace ListDeviceFleetsResponse { +export namespace ListArtifactsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListDeviceFleetsResponse): any => ({ + export const filterSensitiveLog = (obj: ListArtifactsResponse): any => ({ ...obj, }); } -export interface ListDevicesRequest { +export enum SortAssociationsBy { + CREATION_TIME = "CreationTime", + DESTINATION_ARN = "DestinationArn", + DESTINATION_TYPE = "DestinationType", + SOURCE_ARN = "SourceArn", + SOURCE_TYPE = "SourceType", +} + +export interface ListAssociationsRequest { /** - *The response from the last list when returning a list large enough to need tokening.
+ *A filter that returns only associations with the specified source ARN.
*/ - NextToken?: string; + SourceArn?: string; /** - *Maximum number of results to select.
+ *A filter that returns only associations with the specified destination Amazon Resource Name (ARN).
*/ - MaxResults?: number; + DestinationArn?: string; /** - *Select fleets where the job was updated after X
+ *A filter that returns only associations with the specified source type.
*/ - LatestHeartbeatAfter?: Date; + SourceType?: string; /** - *A filter that searches devices that contains this name in any of their models.
+ *A filter that returns only associations with the specified destination type.
*/ - ModelName?: string; + DestinationType?: string; /** - *Filter for fleets containing this name in their device fleet name.
+ *A filter that returns only associations of the specified type.
*/ - DeviceFleetName?: string; -} + AssociationType?: AssociationEdgeType | string; -export namespace ListDevicesRequest { /** - * @internal + *A filter that returns only associations created on or after the specified time.
*/ - export const filterSensitiveLog = (obj: ListDevicesRequest): any => ({ - ...obj, - }); -} + CreatedAfter?: Date; -export interface ListDevicesResponse { /** - *Summary of devices.
+ *A filter that returns only associations created on or before the specified time.
*/ - DeviceSummaries: DeviceSummary[] | undefined; + CreatedBefore?: Date; /** - *The response from the last list when returning a list large enough to need tokening.
+ *The property used to sort results. The default value is CreationTime
.
The sort order. The default value is Descending
.
If the previous response was truncated, you will receive this token. - * Use it in your next request to receive the next set of results.
+ *If the previous call to ListAssociations
didn't return the full set of associations,
+ * the call returns a token for getting the next set of associations.
Returns a list up to a specified limit.
+ *The maximum number of associations to return in the response. The default value is 10.
*/ MaxResults?: number; } -export namespace ListDomainsRequest { +export namespace ListAssociationsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListDomainsRequest): any => ({ + export const filterSensitiveLog = (obj: ListAssociationsRequest): any => ({ ...obj, }); } -export interface ListDomainsResponse { +export interface ListAssociationsResponse { /** - *The list of domains.
+ *A list of associations and their properties.
*/ - Domains?: DomainDetails[]; + AssociationSummaries?: AssociationSummary[]; /** - *If the previous response was truncated, you will receive this token. - * Use it in your next request to receive the next set of results.
+ *A token for getting the next set of associations, if there are any.
*/ NextToken?: string; } -export namespace ListDomainsResponse { +export namespace ListAssociationsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListDomainsResponse): any => ({ + export const filterSensitiveLog = (obj: ListAssociationsResponse): any => ({ ...obj, }); } -export enum ListEdgePackagingJobsSortBy { - CreationTime = "CREATION_TIME", - EdgePackagingJobStatus = "STATUS", - LastModifiedTime = "LAST_MODIFIED_TIME", - ModelName = "MODEL_NAME", - Name = "NAME", -} - -export interface ListEdgePackagingJobsRequest { - /** - *The response from the last list when returning a list large enough to need tokening.
- */ - NextToken?: string; - - /** - *Maximum number of results to select.
- */ - MaxResults?: number; - +export interface ListAutoMLJobsRequest { /** - *Select jobs where the job was created after specified time.
+ *Request a list of jobs, using a filter for time.
*/ CreationTimeAfter?: Date; /** - *Select jobs where the job was created before specified time.
+ *Request a list of jobs, using a filter for time.
*/ CreationTimeBefore?: Date; /** - *Select jobs where the job was updated after specified time.
+ *Request a list of jobs, using a filter for time.
*/ LastModifiedTimeAfter?: Date; /** - *Select jobs where the job was updated before specified time.
+ *Request a list of jobs, using a filter for time.
*/ LastModifiedTimeBefore?: Date; /** - *Filter for jobs containing this name in their packaging job name.
+ *Request a list of jobs, using a search filter for name.
*/ NameContains?: string; /** - *Filter for jobs where the model name contains this string.
+ *Request a list of jobs, using a filter for status.
*/ - ModelNameContains?: string; + StatusEquals?: AutoMLJobStatus | string; /** - *The job status to filter for.
+ *The sort order for the results. The default is Descending
.
Use to specify what column to sort by.
+ *The parameter by which to sort the results. The default is Name
.
What direction to sort by.
+ *Request a list of jobs up to a specified limit.
*/ - SortOrder?: SortOrder | string; + MaxResults?: number; + + /** + *If the previous response was truncated, you receive this token. Use it in your next + * request to receive the next set of results.
+ */ + NextToken?: string; } -export namespace ListEdgePackagingJobsRequest { +export namespace ListAutoMLJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListEdgePackagingJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListAutoMLJobsRequest): any => ({ ...obj, }); } -export interface ListEdgePackagingJobsResponse { +export interface ListAutoMLJobsResponse { /** - *Summaries of edge packaging jobs.
+ *Returns a summary list of jobs.
*/ - EdgePackagingJobSummaries: EdgePackagingJobSummary[] | undefined; + AutoMLJobSummaries: AutoMLJobSummary[] | undefined; /** - *Token to use when calling the next page of results.
+ *If the previous response was truncated, you receive this token. Use it in your next + * request to receive the next set of results.
*/ NextToken?: string; } -export namespace ListEdgePackagingJobsResponse { +export namespace ListAutoMLJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListEdgePackagingJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListAutoMLJobsResponse): any => ({ ...obj, }); } -export enum OrderKey { - Ascending = "Ascending", - Descending = "Descending", -} - -export interface ListEndpointConfigsInput { +export interface ListCandidatesForAutoMLJobRequest { /** - *The field to sort results by. The default is CreationTime
.
List the candidates created for the job by providing the job's name.
*/ - SortBy?: EndpointConfigSortKey | string; + AutoMLJobName: string | undefined; /** - *The sort order for results. The default is Descending
.
List the candidates for the job and filter by status.
*/ - SortOrder?: OrderKey | string; + StatusEquals?: CandidateStatus | string; /** - *If the result of the previous ListEndpointConfig
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * endpoint configurations, use the token in the next request.
List the candidates for the job and filter by candidate name.
*/ - NextToken?: string; + CandidateNameEquals?: string; /** - *The maximum number of training jobs to return in the response.
+ *The sort order for the results. The default is Ascending
.
A string in the endpoint configuration name. This filter returns only endpoint - * configurations whose name contains the specified string.
+ *The parameter by which to sort the results. The default is
+ * Descending
.
A filter that returns only endpoint configurations created before the specified - * time (timestamp).
+ *List the job's candidates up to a specified limit.
*/ - CreationTimeBefore?: Date; + MaxResults?: number; /** - *A filter that returns only endpoint configurations with a creation time greater - * than or equal to the specified time (timestamp).
+ *If the previous response was truncated, you receive this token. Use it in your next + * request to receive the next set of results.
*/ - CreationTimeAfter?: Date; + NextToken?: string; } -export namespace ListEndpointConfigsInput { +export namespace ListCandidatesForAutoMLJobRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListEndpointConfigsInput): any => ({ + export const filterSensitiveLog = (obj: ListCandidatesForAutoMLJobRequest): any => ({ ...obj, }); } -export interface ListEndpointConfigsOutput { +export interface ListCandidatesForAutoMLJobResponse { /** - *An array of endpoint configurations.
+ *Summaries about the AutoMLCandidates
.
If the response is truncated, SageMaker returns this token. To retrieve the next set of - * endpoint configurations, use it in the subsequent request
+ *If the previous response was truncated, you receive this token. Use it in your next + * request to receive the next set of results.
*/ NextToken?: string; } -export namespace ListEndpointConfigsOutput { +export namespace ListCandidatesForAutoMLJobResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListEndpointConfigsOutput): any => ({ + export const filterSensitiveLog = (obj: ListCandidatesForAutoMLJobResponse): any => ({ ...obj, }); } -export interface ListEndpointsInput { - /** - *Sorts the list of results. The default is CreationTime
.
The sort order for results. The default is Descending
.
A filter that returns only Git repositories that were created after the specified + * time.
*/ - SortOrder?: OrderKey | string; + CreationTimeAfter?: Date; /** - *If the result of a ListEndpoints
request was truncated, the response
- * includes a NextToken
. To retrieve the next set of endpoints, use the token
- * in the next request.
A filter that returns only Git repositories that were created before the specified + * time.
*/ - NextToken?: string; + CreationTimeBefore?: Date; /** - *The maximum number of endpoints to return in the response. This value defaults to - * 10.
+ *A filter that returns only Git repositories that were last modified after the + * specified time.
*/ - MaxResults?: number; + LastModifiedTimeAfter?: Date; /** - *A string in endpoint names. This filter returns only endpoints whose name contains - * the specified string.
+ *A filter that returns only Git repositories that were last modified before the + * specified time.
*/ - NameContains?: string; + LastModifiedTimeBefore?: Date; /** - *A filter that returns only endpoints that were created before the specified time - * (timestamp).
+ *The maximum number of Git repositories to return in the response.
*/ - CreationTimeBefore?: Date; + MaxResults?: number; /** - *A filter that returns only endpoints with a creation time greater than or equal to - * the specified time (timestamp).
+ *A string in the Git repositories name. This filter returns only repositories whose + * name contains the specified string.
*/ - CreationTimeAfter?: Date; + NameContains?: string; /** - *A filter that returns only endpoints that were modified before the specified - * timestamp.
+ *If the result of a ListCodeRepositoriesOutput
request was truncated, the
+ * response includes a NextToken
. To get the next set of Git repositories, use
+ * the token in the next request.
A filter that returns only endpoints that were modified after the specified - * timestamp.
+ *The field to sort results by. The default is Name
.
A filter that returns only endpoints with the specified status.
+ *The sort order for results. The default is Ascending
.
An array or endpoint objects.
+ *Gets a list of summaries of the Git repositories. Each summary specifies the following + * values for the repository:
+ *Name
+ *Amazon Resource Name (ARN)
+ *Creation time
+ *Last modified time
+ *Configuration information, including the URL location of the repository and + * the ARN of the Amazon Web Services Secrets Manager secret that contains the credentials used + * to access the repository.
+ *If the response is truncated, SageMaker returns this token. To retrieve the next set of - * training jobs, use it in the subsequent request.
+ *If the result of a ListCodeRepositoriesOutput
request was truncated, the
+ * response includes a NextToken
. To get the next set of Git repositories, use
+ * the token in the next request.
A filter that returns only experiments created after the specified time.
+ *If the result of the previous ListCompilationJobs
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of model
+ * compilation jobs, use the token in the next request.
A filter that returns only experiments created before the specified time.
+ *The maximum number of model compilation jobs to return in the response.
*/ - CreatedBefore?: Date; + MaxResults?: number; /** - *The property used to sort results. The default value is CreationTime
.
A filter that returns the model compilation jobs that were created after a specified + * time.
*/ - SortBy?: SortExperimentsBy | string; + CreationTimeAfter?: Date; /** - *The sort order. The default value is Descending
.
A filter that returns the model compilation jobs that were created before a specified + * time.
*/ - SortOrder?: SortOrder | string; + CreationTimeBefore?: Date; /** - *If the previous call to ListExperiments
didn't return the full set of
- * experiments, the call returns a token for getting the next set of experiments.
A filter that returns the model compilation jobs that were modified after a specified + * time.
*/ - NextToken?: string; + LastModifiedTimeAfter?: Date; /** - *The maximum number of experiments to return in the response. The default value is - * 10.
+ *A filter that returns the model compilation jobs that were modified before a specified + * time.
*/ - MaxResults?: number; + LastModifiedTimeBefore?: Date; + + /** + *A filter that returns the model compilation jobs whose name contains a specified + * string.
+ */ + NameContains?: string; + + /** + *A filter that retrieves model compilation jobs with a specific DescribeCompilationJobResponse$CompilationJobStatus status.
+ */ + StatusEquals?: CompilationJobStatus | string; + + /** + *The field by which to sort results. The default is CreationTime
.
The sort order for results. The default is Ascending
.
A list of the summaries of your experiments.
+ *An array of CompilationJobSummary objects, each describing a model + * compilation job.
*/ - ExperimentSummaries?: ExperimentSummary[]; + CompilationJobSummaries: CompilationJobSummary[] | undefined; /** - *A token for getting the next set of experiments, if there are any.
+ *If the response is truncated, Amazon SageMaker returns this NextToken
. To retrieve
+ * the next set of model compilation jobs, use this token in the next request.
A string that partially matches one or more FeatureGroup
s names. Filters
- * FeatureGroup
s by name.
A FeatureGroup
status. Filters by FeatureGroup
status.
A filter that returns only contexts with the specified source URI.
*/ - FeatureGroupStatusEquals?: FeatureGroupStatus | string; + SourceUri?: string; /** - *An OfflineStore
status. Filters by OfflineStore
status.
A filter that returns only contexts of the specified type.
*/ - OfflineStoreStatusEquals?: OfflineStoreStatusValue | string; + ContextType?: string; /** - *Use this parameter to search for FeatureGroups
s created after a specific
- * date and time.
A filter that returns only contexts created on or after the specified time.
*/ - CreationTimeAfter?: Date; + CreatedAfter?: Date; /** - *Use this parameter to search for FeatureGroups
s created before a specific
- * date and time.
A filter that returns only contexts created on or before the specified time.
*/ - CreationTimeBefore?: Date; + CreatedBefore?: Date; /** - *The order in which feature groups are listed.
+ *The property used to sort results. The default value is CreationTime
.
The value on which the feature group list is sorted.
+ *The sort order. The default value is Descending
.
The maximum number of results returned by ListFeatureGroups
.
If the previous call to ListContexts
didn't return the full set of contexts,
+ * the call returns a token for getting the next set of contexts.
A token to resume pagination of ListFeatureGroups
results.
The maximum number of contexts to return in the response. The default value is 10.
*/ - NextToken?: string; + MaxResults?: number; } -export namespace ListFeatureGroupsRequest { +export namespace ListContextsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListFeatureGroupsRequest): any => ({ + export const filterSensitiveLog = (obj: ListContextsRequest): any => ({ ...obj, }); } -export interface ListFeatureGroupsResponse { +export interface ListContextsResponse { /** - *A summary of feature groups.
+ *A list of contexts and their properties.
*/ - FeatureGroupSummaries: FeatureGroupSummary[] | undefined; + ContextSummaries?: ContextSummary[]; /** - *A token to resume pagination of ListFeatureGroups
results.
A token for getting the next set of contexts, if there are any.
*/ - NextToken: string | undefined; + NextToken?: string; } -export namespace ListFeatureGroupsResponse { +export namespace ListContextsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListFeatureGroupsResponse): any => ({ + export const filterSensitiveLog = (obj: ListContextsResponse): any => ({ ...obj, }); } -export interface ListFlowDefinitionsRequest { +export enum MonitoringJobDefinitionSortKey { + CREATION_TIME = "CreationTime", + NAME = "Name", +} + +export interface ListDataQualityJobDefinitionsRequest { /** - *A filter that returns only flow definitions with a creation time greater than or equal to the specified timestamp.
+ *A filter that lists the data quality job definitions associated with the specified + * endpoint.
*/ - CreationTimeAfter?: Date; + EndpointName?: string; /** - *A filter that returns only flow definitions that were created before the specified timestamp.
+ *The field to sort results by. The default is CreationTime
.
An optional value that specifies whether you want the results sorted in Ascending
or Descending
order.
The sort order for results. The default is Descending
.
A token to resume pagination.
+ *If the result of the previous ListDataQualityJobDefinitions
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * transform jobs, use the token in the next request.>
The total number of items to return. If the total number of available items is more than the value specified in MaxResults
, then a NextToken
will be provided in the output that you can use to resume pagination.
The maximum number of data quality monitoring job definitions to return in the + * response.
*/ MaxResults?: number; -} -export namespace ListFlowDefinitionsRequest { /** - * @internal + *A string in the data quality monitoring job definition name. This filter returns only + * data quality monitoring job definitions whose name contains the specified string.
*/ - export const filterSensitiveLog = (obj: ListFlowDefinitionsRequest): any => ({ - ...obj, - }); -} + NameContains?: string; -export interface ListFlowDefinitionsResponse { /** - *An array of objects describing the flow definitions.
+ *A filter that returns only data quality monitoring job definitions created before the + * specified time.
*/ - FlowDefinitionSummaries: FlowDefinitionSummary[] | undefined; + CreationTimeBefore?: Date; /** - *A token to resume pagination.
+ *A filter that returns only data quality monitoring job definitions created after the + * specified time.
*/ - NextToken?: string; + CreationTimeAfter?: Date; } -export namespace ListFlowDefinitionsResponse { +export namespace ListDataQualityJobDefinitionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListFlowDefinitionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListDataQualityJobDefinitionsRequest): any => ({ ...obj, }); } -export interface ListHumanTaskUisRequest { - /** - *A filter that returns only human task user interfaces with a creation time greater than or equal to the specified timestamp.
- */ - CreationTimeAfter?: Date; - +/** + *Summary information about a monitoring job.
+ */ +export interface MonitoringJobDefinitionSummary { /** - *A filter that returns only human task user interfaces that were created before the specified timestamp.
+ *The name of the monitoring job.
*/ - CreationTimeBefore?: Date; + MonitoringJobDefinitionName: string | undefined; /** - *An optional value that specifies whether you want the results sorted in Ascending
or Descending
order.
The Amazon Resource Name (ARN) of the monitoring job.
*/ - SortOrder?: SortOrder | string; + MonitoringJobDefinitionArn: string | undefined; /** - *A token to resume pagination.
+ *The time that the monitoring job was created.
*/ - NextToken?: string; + CreationTime: Date | undefined; /** - *The total number of items to return. If the total number of available items is more than the value specified in MaxResults
, then a NextToken
will be provided in the output that you can use to resume pagination.
The name of the endpoint that the job monitors.
*/ - MaxResults?: number; + EndpointName: string | undefined; } -export namespace ListHumanTaskUisRequest { +export namespace MonitoringJobDefinitionSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListHumanTaskUisRequest): any => ({ + export const filterSensitiveLog = (obj: MonitoringJobDefinitionSummary): any => ({ ...obj, }); } -export interface ListHumanTaskUisResponse { +export interface ListDataQualityJobDefinitionsResponse { /** - *An array of objects describing the human task user interfaces.
+ *A list of data quality monitoring job definitions.
*/ - HumanTaskUiSummaries: HumanTaskUiSummary[] | undefined; + JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; /** - *A token to resume pagination.
+ *If the result of the previous ListDataQualityJobDefinitions
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of data
+ * quality monitoring job definitions, use the token in the next request.
If the result of the previous ListHyperParameterTuningJobs
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * tuning jobs, use the token in the next request.
The response from the last list when returning a list large enough to need tokening.
*/ NextToken?: string; /** - *The - * maximum number of tuning jobs to return. The default value is - * 10.
+ *The maximum number of results to select.
*/ MaxResults?: number; /** - *The field to sort results by. The default is Name
.
The sort order for results. The default is Ascending
.
Filter fleets where packaging job was created after specified time.
*/ - SortOrder?: SortOrder | string; + CreationTimeAfter?: Date; /** - *A string in the tuning job name. This filter returns only tuning jobs whose name - * contains the specified string.
+ *Filter fleets where the edge packaging job was created before specified time.
*/ - NameContains?: string; + CreationTimeBefore?: Date; /** - *A filter that returns only tuning jobs that were created after the specified - * time.
+ *Select fleets where the job was updated after X
*/ - CreationTimeAfter?: Date; + LastModifiedTimeAfter?: Date; /** - *A filter that returns only tuning jobs that were created before the specified - * time.
+ *Select fleets where the job was updated before X
*/ - CreationTimeBefore?: Date; + LastModifiedTimeBefore?: Date; /** - *A filter that returns only tuning jobs that were modified after the specified - * time.
+ *Filter for fleets containing this name in their fleet device name.
*/ - LastModifiedTimeAfter?: Date; + NameContains?: string; /** - *A filter that returns only tuning jobs that were modified before the specified - * time.
+ *The column to sort by.
*/ - LastModifiedTimeBefore?: Date; + SortBy?: ListDeviceFleetsSortBy | string; /** - *A filter that returns only tuning jobs with the specified status.
+ *What direction to sort in.
*/ - StatusEquals?: HyperParameterTuningJobStatus | string; + SortOrder?: SortOrder | string; } -export namespace ListHyperParameterTuningJobsRequest { +export namespace ListDeviceFleetsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListHyperParameterTuningJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListDeviceFleetsRequest): any => ({ ...obj, }); } -export interface ListHyperParameterTuningJobsResponse { +export interface ListDeviceFleetsResponse { /** - *A list of HyperParameterTuningJobSummary objects that
- * describe
- * the tuning jobs that the ListHyperParameterTuningJobs
- * request returned.
Summary of the device fleet.
*/ - HyperParameterTuningJobSummaries: HyperParameterTuningJobSummary[] | undefined; + DeviceFleetSummaries: DeviceFleetSummary[] | undefined; /** - *If the result of this ListHyperParameterTuningJobs
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of tuning jobs,
- * use the token in the next request.
The response from the last list when returning a list large enough to need tokening.
*/ NextToken?: string; } -export namespace ListHyperParameterTuningJobsResponse { +export namespace ListDeviceFleetsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListHyperParameterTuningJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListDeviceFleetsResponse): any => ({ ...obj, }); } -export interface ListImagesRequest { +export interface ListDevicesRequest { /** - *A filter that returns only images created on or after the specified time.
+ *The response from the last list when returning a list large enough to need tokening.
*/ - CreationTimeAfter?: Date; + NextToken?: string; /** - *A filter that returns only images created on or before the specified time.
+ *Maximum number of results to select.
*/ - CreationTimeBefore?: Date; + MaxResults?: number; /** - *A filter that returns only images modified on or after the specified time.
+ *Select fleets where the job was updated after X
*/ - LastModifiedTimeAfter?: Date; + LatestHeartbeatAfter?: Date; /** - *A filter that returns only images modified on or before the specified time.
+ *A filter that searches devices that contains this name in any of their models.
*/ - LastModifiedTimeBefore?: Date; + ModelName?: string; /** - *The maximum number of images to return in the response. The default value is 10.
+ *Filter for fleets containing this name in their device fleet name.
*/ - MaxResults?: number; + DeviceFleetName?: string; +} +export namespace ListDevicesRequest { /** - *A filter that returns only images whose name contains the specified string.
+ * @internal */ - NameContains?: string; + export const filterSensitiveLog = (obj: ListDevicesRequest): any => ({ + ...obj, + }); +} +export interface ListDevicesResponse { /** - *If the previous call to ListImages
didn't return the full set of images,
- * the call returns a token for getting the next set of images.
Summary of devices.
+ */ + DeviceSummaries: DeviceSummary[] | undefined; + + /** + *The response from the last list when returning a list large enough to need tokening.
*/ NextToken?: string; +} +export namespace ListDevicesResponse { /** - *The property used to sort results. The default value is CREATION_TIME
.
The sort order. The default value is DESCENDING
.
If the previous response was truncated, you will receive this token. + * Use it in your next request to receive the next set of results.
*/ - SortOrder?: ImageSortOrder | string; + NextToken?: string; + + /** + *Returns a list up to a specified limit.
+ */ + MaxResults?: number; } -export namespace ListImagesRequest { +export namespace ListDomainsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListImagesRequest): any => ({ + export const filterSensitiveLog = (obj: ListDomainsRequest): any => ({ ...obj, }); } -export interface ListImagesResponse { +export interface ListDomainsResponse { /** - *A list of images and their properties.
+ *The list of domains.
*/ - Images?: Image[]; + Domains?: DomainDetails[]; /** - *A token for getting the next set of images, if there are any.
+ *If the previous response was truncated, you will receive this token. + * Use it in your next request to receive the next set of results.
*/ NextToken?: string; } -export namespace ListImagesResponse { +export namespace ListDomainsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListImagesResponse): any => ({ + export const filterSensitiveLog = (obj: ListDomainsResponse): any => ({ ...obj, }); } -export interface ListImageVersionsRequest { +export enum ListEdgeDeploymentPlansSortBy { + CreationTime = "CREATION_TIME", + DeviceFleetName = "DEVICE_FLEET_NAME", + LastModifiedTime = "LAST_MODIFIED_TIME", + Name = "NAME", +} + +export interface ListEdgeDeploymentPlansRequest { /** - *A filter that returns only versions created on or after the specified time.
+ *The response from the last list when returning a list large enough to need tokening.
*/ - CreationTimeAfter?: Date; + NextToken?: string; /** - *A filter that returns only versions created on or before the specified time.
+ *The maximum number of results to select (50 by default).
*/ - CreationTimeBefore?: Date; + MaxResults?: number; /** - *The name of the image to list the versions of.
+ *Selects edge deployment plans created after this time.
*/ - ImageName: string | undefined; + CreationTimeAfter?: Date; /** - *A filter that returns only versions modified on or after the specified time.
+ *Selects edge deployment plans created before this time.
+ */ + CreationTimeBefore?: Date; + + /** + *Selects edge deployment plans that were last updated after this time.
*/ LastModifiedTimeAfter?: Date; /** - *A filter that returns only versions modified on or before the specified time.
+ *Selects edge deployment plans that were last updated before this time.
*/ LastModifiedTimeBefore?: Date; /** - *The maximum number of versions to return in the response. The default value is 10.
+ *Selects edge deployment plans with names containing this name.
*/ - MaxResults?: number; + NameContains?: string; /** - *If the previous call to ListImageVersions
didn't return the full set of
- * versions, the call returns a token for getting the next set of versions.
Selects edge deployment plans with a device fleet name containing this name.
*/ - NextToken?: string; + DeviceFleetNameContains?: string; /** - *The property used to sort results. The default value is CREATION_TIME
.
The column by which to sort the edge deployment plans. Can be one of NAME
, DEVICEFLEETNAME
, CREATIONTIME
, LASTMODIFIEDTIME
.
The sort order. The default value is DESCENDING
.
The direction of the sorting (ascending or descending).
*/ - SortOrder?: ImageVersionSortOrder | string; + SortOrder?: SortOrder | string; } -export namespace ListImageVersionsRequest { +export namespace ListEdgeDeploymentPlansRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListImageVersionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListEdgeDeploymentPlansRequest): any => ({ ...obj, }); } -export interface ListImageVersionsResponse { +export interface ListEdgeDeploymentPlansResponse { /** - *A list of versions and their properties.
+ *List of summaries of edge deployment plans.
*/ - ImageVersions?: ImageVersion[]; + EdgeDeploymentPlanSummaries: EdgeDeploymentPlanSummary[] | undefined; /** - *A token for getting the next set of versions, if there are any.
+ *The token to use when calling the next page of results.
*/ NextToken?: string; } -export namespace ListImageVersionsResponse { +export namespace ListEdgeDeploymentPlansResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListImageVersionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListEdgeDeploymentPlansResponse): any => ({ ...obj, }); } -export enum ListInferenceRecommendationsJobsSortBy { - CREATION_TIME = "CreationTime", - NAME = "Name", - STATUS = "Status", +export enum ListEdgePackagingJobsSortBy { + CreationTime = "CREATION_TIME", + EdgePackagingJobStatus = "STATUS", + LastModifiedTime = "LAST_MODIFIED_TIME", + ModelName = "MODEL_NAME", + Name = "NAME", } -export interface ListInferenceRecommendationsJobsRequest { +export interface ListEdgePackagingJobsRequest { /** - *A filter that returns only jobs created after the specified time (timestamp).
+ *The response from the last list when returning a list large enough to need tokening.
+ */ + NextToken?: string; + + /** + *Maximum number of results to select.
+ */ + MaxResults?: number; + + /** + *Select jobs where the job was created after specified time.
*/ CreationTimeAfter?: Date; /** - *A filter that returns only jobs created before the specified time (timestamp).
+ *Select jobs where the job was created before specified time.
*/ CreationTimeBefore?: Date; /** - *A filter that returns only jobs that were last modified after the specified time (timestamp).
+ *Select jobs where the job was updated after specified time.
*/ LastModifiedTimeAfter?: Date; /** - *A filter that returns only jobs that were last modified before the specified time (timestamp).
+ *Select jobs where the job was updated before specified time.
*/ LastModifiedTimeBefore?: Date; /** - *A string in the job name. This filter returns only recommendations whose name contains the specified string.
+ *Filter for jobs containing this name in their packaging job name.
*/ NameContains?: string; /** - *A filter that retrieves only inference recommendations jobs with a specific status.
- */ - StatusEquals?: RecommendationJobStatus | string; - - /** - *The parameter by which to sort the results.
+ *Filter for jobs where the model name contains this string.
*/ - SortBy?: ListInferenceRecommendationsJobsSortBy | string; + ModelNameContains?: string; /** - *The sort order for the results.
+ *The job status to filter for.
*/ - SortOrder?: SortOrder | string; + StatusEquals?: EdgePackagingJobStatus | string; /** - *If the response to a previous ListInferenceRecommendationsJobsRequest
request
- * was truncated, the response includes a NextToken
. To retrieve the next set
- * of recommendations, use the token in the next request.
Use to specify what column to sort by.
*/ - NextToken?: string; + SortBy?: ListEdgePackagingJobsSortBy | string; /** - *The maximum number of recommendations to return in the response.
+ *What direction to sort by.
*/ - MaxResults?: number; + SortOrder?: SortOrder | string; } -export namespace ListInferenceRecommendationsJobsRequest { +export namespace ListEdgePackagingJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListInferenceRecommendationsJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListEdgePackagingJobsRequest): any => ({ ...obj, }); } -export interface ListInferenceRecommendationsJobsResponse { +export interface ListEdgePackagingJobsResponse { /** - *The recommendations created from the Amazon SageMaker Inference Recommender job.
+ *Summaries of edge packaging jobs.
*/ - InferenceRecommendationsJobs: InferenceRecommendationsJob[] | undefined; + EdgePackagingJobSummaries: EdgePackagingJobSummary[] | undefined; /** - *A token for getting the next set of recommendations, if there are any.
+ *Token to use when calling the next page of results.
*/ NextToken?: string; } -export namespace ListInferenceRecommendationsJobsResponse { +export namespace ListEdgePackagingJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListInferenceRecommendationsJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListEdgePackagingJobsResponse): any => ({ ...obj, }); } -export enum SortBy { - CREATION_TIME = "CreationTime", - NAME = "Name", - STATUS = "Status", +export enum OrderKey { + Ascending = "Ascending", + Descending = "Descending", } -export interface ListLabelingJobsRequest { - /** - *A filter that returns only labeling jobs created after the specified time - * (timestamp).
- */ - CreationTimeAfter?: Date; - +export interface ListEndpointConfigsInput { /** - *A filter that returns only labeling jobs created before the specified time - * (timestamp).
+ *The field to sort results by. The default is CreationTime
.
A filter that returns only labeling jobs modified after the specified time - * (timestamp).
+ *The sort order for results. The default is Descending
.
A filter that returns only labeling jobs modified before the specified time - * (timestamp).
+ *If the result of the previous ListEndpointConfig
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * endpoint configurations, use the token in the next request.
The maximum number of labeling jobs to return in each page of the response.
+ *The maximum number of training jobs to return in the response.
*/ MaxResults?: number; /** - *If the result of the previous ListLabelingJobs
request was truncated, the
- * response includes a NextToken
. To retrieve the next set of labeling jobs,
- * use the token in the next request.
A string in the labeling job name. This filter returns only labeling jobs whose name - * contains the specified string.
+ *A string in the endpoint configuration name. This filter returns only endpoint + * configurations whose name contains the specified string.
*/ NameContains?: string; /** - *The field to sort results by. The default is CreationTime
.
The sort order for results. The default is Ascending
.
A filter that returns only endpoint configurations created before the specified + * time (timestamp).
*/ - SortOrder?: SortOrder | string; + CreationTimeBefore?: Date; /** - *A filter that retrieves only labeling jobs with a specific status.
+ *A filter that returns only endpoint configurations with a creation time greater + * than or equal to the specified time (timestamp).
*/ - StatusEquals?: LabelingJobStatus | string; + CreationTimeAfter?: Date; } -export namespace ListLabelingJobsRequest { +export namespace ListEndpointConfigsInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListLabelingJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListEndpointConfigsInput): any => ({ ...obj, }); } -export interface ListLabelingJobsResponse { +export interface ListEndpointConfigsOutput { /** - *An array of LabelingJobSummary
objects, each describing a labeling
- * job.
An array of endpoint configurations.
*/ - LabelingJobSummaryList?: LabelingJobSummary[]; + EndpointConfigs: EndpointConfigSummary[] | undefined; /** - *If the response is truncated, SageMaker returns this token. To retrieve the next set of - * labeling jobs, use it in the subsequent request.
+ *If the response is truncated, SageMaker returns this token. To retrieve the next set of + * endpoint configurations, use it in the subsequent request
*/ NextToken?: string; } -export namespace ListLabelingJobsResponse { +export namespace ListEndpointConfigsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListLabelingJobsResponse): any => ({ + export const filterSensitiveLog = (obj: ListEndpointConfigsOutput): any => ({ ...obj, }); } -export enum ListLabelingJobsForWorkteamSortByOptions { - CREATION_TIME = "CreationTime", -} - -export interface ListLabelingJobsForWorkteamRequest { +export interface ListEndpointsInput { /** - *The Amazon Resource Name (ARN) of the work team for which you want to see labeling - * jobs for.
+ *Sorts the list of results. The default is CreationTime
.
The maximum number of labeling jobs to return in each page of the response.
+ *The sort order for results. The default is Descending
.
If the result of the previous ListLabelingJobsForWorkteam
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * labeling jobs, use the token in the next request.
If the result of a ListEndpoints
request was truncated, the response
+ * includes a NextToken
. To retrieve the next set of endpoints, use the token
+ * in the next request.
A filter that returns only labeling jobs created after the specified time - * (timestamp).
+ *The maximum number of endpoints to return in the response. This value defaults to + * 10.
*/ - CreationTimeAfter?: Date; + MaxResults?: number; /** - *A filter that returns only labeling jobs created before the specified time + *
A string in endpoint names. This filter returns only endpoints whose name contains + * the specified string.
+ */ + NameContains?: string; + + /** + *A filter that returns only endpoints that were created before the specified time * (timestamp).
*/ CreationTimeBefore?: Date; /** - *A filter the limits jobs to only the ones whose job reference code contains the - * specified string.
+ *A filter that returns only endpoints with a creation time greater than or equal to + * the specified time (timestamp).
*/ - JobReferenceCodeContains?: string; + CreationTimeAfter?: Date; /** - *The field to sort results by. The default is CreationTime
.
A filter that returns only endpoints that were modified before the specified + * timestamp.
*/ - SortBy?: ListLabelingJobsForWorkteamSortByOptions | string; + LastModifiedTimeBefore?: Date; /** - *The sort order for results. The default is Ascending
.
A filter that returns only endpoints that were modified after the specified + * timestamp.
*/ - SortOrder?: SortOrder | string; + LastModifiedTimeAfter?: Date; + + /** + *A filter that returns only endpoints with the specified status.
+ */ + StatusEquals?: EndpointStatus | string; } -export namespace ListLabelingJobsForWorkteamRequest { +export namespace ListEndpointsInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListLabelingJobsForWorkteamRequest): any => ({ + export const filterSensitiveLog = (obj: ListEndpointsInput): any => ({ ...obj, }); } -export interface ListLabelingJobsForWorkteamResponse { +export interface ListEndpointsOutput { /** - *An array of LabelingJobSummary
objects, each describing a labeling
- * job.
An array or endpoint objects.
*/ - LabelingJobSummaryList: LabelingJobForWorkteamSummary[] | undefined; + Endpoints: EndpointSummary[] | undefined; /** - *If the response is truncated, SageMaker returns this token. To retrieve the next set of - * labeling jobs, use it in the subsequent request.
+ *If the response is truncated, SageMaker returns this token. To retrieve the next set of + * training jobs, use it in the subsequent request.
*/ NextToken?: string; } -export namespace ListLabelingJobsForWorkteamResponse { +export namespace ListEndpointsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListLabelingJobsForWorkteamResponse): any => ({ + export const filterSensitiveLog = (obj: ListEndpointsOutput): any => ({ ...obj, }); } -export enum SortLineageGroupsBy { +export enum SortExperimentsBy { CREATION_TIME = "CreationTime", NAME = "Name", } -export interface ListLineageGroupsRequest { +export interface ListExperimentsRequest { /** - *A timestamp to filter against lineage groups created after a certain point in time.
+ *A filter that returns only experiments created after the specified time.
*/ CreatedAfter?: Date; /** - *A timestamp to filter against lineage groups created before a certain point in time.
+ *A filter that returns only experiments created before the specified time.
*/ CreatedBefore?: Date; /** - *The parameter by which to sort the results. The default is
- * CreationTime
.
The property used to sort results. The default value is CreationTime
.
The sort order for the results. The default is Ascending
.
The sort order. The default value is Descending
.
If the response is truncated, SageMaker returns this token. To retrieve the next set of - * algorithms, use it in the subsequent request.
+ *If the previous call to ListExperiments
didn't return the full set of
+ * experiments, the call returns a token for getting the next set of experiments.
The maximum number of endpoints to return in the response. This value defaults to - * 10.
+ *The maximum number of experiments to return in the response. The default value is + * 10.
*/ MaxResults?: number; } -export namespace ListLineageGroupsRequest { +export namespace ListExperimentsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListLineageGroupsRequest): any => ({ + export const filterSensitiveLog = (obj: ListExperimentsRequest): any => ({ ...obj, }); } -export interface ListLineageGroupsResponse { +export interface ListExperimentsResponse { /** - *A list of lineage groups and their properties.
+ *A list of the summaries of your experiments.
*/ - LineageGroupSummaries?: LineageGroupSummary[]; + ExperimentSummaries?: ExperimentSummary[]; /** - *If the response is truncated, SageMaker returns this token. To retrieve the next set of - * algorithms, use it in the subsequent request.
+ *A token for getting the next set of experiments, if there are any.
*/ NextToken?: string; } -export namespace ListLineageGroupsResponse { +export namespace ListExperimentsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListLineageGroupsResponse): any => ({ + export const filterSensitiveLog = (obj: ListExperimentsResponse): any => ({ ...obj, }); } -export interface ListModelBiasJobDefinitionsRequest { +export interface ListFeatureGroupsRequest { /** - *Name of the endpoint to monitor for model bias.
+ *A string that partially matches one or more FeatureGroup
s names. Filters
+ * FeatureGroup
s by name.
Whether to sort results by the Name
or CreationTime
field. The
- * default is CreationTime
.
A FeatureGroup
status. Filters by FeatureGroup
status.
Whether to sort the results in Ascending
or Descending
order.
- * The default is Descending
.
An OfflineStore
status. Filters by OfflineStore
status.
The token returned if the response is truncated. To retrieve the next set of job - * executions, use it in the next request.
+ *Use this parameter to search for FeatureGroups
s created after a specific
+ * date and time.
The maximum number of model bias jobs to return in the response. The default value is - * 10.
+ *Use this parameter to search for FeatureGroups
s created before a specific
+ * date and time.
Filter for model bias jobs whose name contains a specified string.
+ *The order in which feature groups are listed.
*/ - NameContains?: string; + SortOrder?: FeatureGroupSortOrder | string; /** - *A filter that returns only model bias jobs created before a specified time.
+ *The value on which the feature group list is sorted.
*/ - CreationTimeBefore?: Date; + SortBy?: FeatureGroupSortBy | string; /** - *A filter that returns only model bias jobs created after a specified time.
+ *The maximum number of results returned by ListFeatureGroups
.
A token to resume pagination of ListFeatureGroups
results.
A JSON array in which each element is a summary for a model bias jobs.
+ *A summary of feature groups.
*/ - JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; + FeatureGroupSummaries: FeatureGroupSummary[] | undefined; /** - *If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, - * use it in the subsequent request.
+ *A token to resume pagination of ListFeatureGroups
results.
Name of the endpoint to monitor for model explainability.
+ *A filter that returns only flow definitions with a creation time greater than or equal to the specified timestamp.
*/ - EndpointName?: string; + CreationTimeAfter?: Date; /** - *Whether to sort results by the Name
or CreationTime
field. The
- * default is CreationTime
.
A filter that returns only flow definitions that were created before the specified timestamp.
*/ - SortBy?: MonitoringJobDefinitionSortKey | string; + CreationTimeBefore?: Date; /** - *Whether to sort the results in Ascending
or Descending
order.
- * The default is Descending
.
An optional value that specifies whether you want the results sorted in Ascending
or Descending
order.
The token returned if the response is truncated. To retrieve the next set of job - * executions, use it in the next request.
+ *A token to resume pagination.
*/ NextToken?: string; /** - *The maximum number of jobs to return in the response. The default value is 10.
+ *The total number of items to return. If the total number of available items is more than the value specified in MaxResults
, then a NextToken
will be provided in the output that you can use to resume pagination.
Filter for model explainability jobs whose name contains a specified string.
+ * @internal */ - NameContains?: string; + export const filterSensitiveLog = (obj: ListFlowDefinitionsRequest): any => ({ + ...obj, + }); +} +export interface ListFlowDefinitionsResponse { /** - *A filter that returns only model explainability jobs created before a specified - * time.
+ *An array of objects describing the flow definitions.
*/ - CreationTimeBefore?: Date; + FlowDefinitionSummaries: FlowDefinitionSummary[] | undefined; /** - *A filter that returns only model explainability jobs created after a specified - * time.
+ *A token to resume pagination.
*/ - CreationTimeAfter?: Date; + NextToken?: string; } -export namespace ListModelExplainabilityJobDefinitionsRequest { +export namespace ListFlowDefinitionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelExplainabilityJobDefinitionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListFlowDefinitionsResponse): any => ({ ...obj, }); } -export interface ListModelExplainabilityJobDefinitionsResponse { +export interface ListHumanTaskUisRequest { /** - *A JSON array in which each element is a summary for a explainability bias jobs.
+ *A filter that returns only human task user interfaces with a creation time greater than or equal to the specified timestamp.
*/ - JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; + CreationTimeAfter?: Date; /** - *If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, - * use it in the subsequent request.
+ *A filter that returns only human task user interfaces that were created before the specified timestamp.
*/ - NextToken?: string; -} + CreationTimeBefore?: Date; -export namespace ListModelExplainabilityJobDefinitionsResponse { /** - * @internal + *An optional value that specifies whether you want the results sorted in Ascending
or Descending
order.
Part of the search expression. You can specify the name and value - * (domain, task, framework, framework version, task, and model).
- */ -export interface ModelMetadataFilter { /** - *The name of the of the model to filter by.
+ *A token to resume pagination.
*/ - Name: ModelMetadataFilterType | string | undefined; + NextToken?: string; /** - *The value to filter the model metadata.
+ *The total number of items to return. If the total number of available items is more than the value specified in MaxResults
, then a NextToken
will be provided in the output that you can use to resume pagination.
One or more filters that searches for the specified resource or resources in - * a search. All resource objects that satisfy the expression's condition are - * included in the search results
- */ -export interface ModelMetadataSearchExpression { +export interface ListHumanTaskUisResponse { /** - *A list of filter objects.
+ *An array of objects describing the human task user interfaces.
*/ - Filters?: ModelMetadataFilter[]; + HumanTaskUiSummaries: HumanTaskUiSummary[] | undefined; + + /** + *A token to resume pagination.
+ */ + NextToken?: string; } -export namespace ModelMetadataSearchExpression { +export namespace ListHumanTaskUisResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ModelMetadataSearchExpression): any => ({ + export const filterSensitiveLog = (obj: ListHumanTaskUisResponse): any => ({ ...obj, }); } -export interface ListModelMetadataRequest { +export interface ListHyperParameterTuningJobsRequest { /** - *One or more filters that searches for the specified resource or resources - * in a search. All resource objects that satisfy the expression's condition are - * included in the search results. Specify the Framework, FrameworkVersion, Domain - * or Task to filter supported. Filter names and values are case-sensitive.
+ *If the result of the previous ListHyperParameterTuningJobs
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * tuning jobs, use the token in the next request.
If the response to a previous ListModelMetadataResponse
request was truncated,
- * the response includes a NextToken. To retrieve the next set of model metadata,
- * use the token in the next request.
The + * maximum number of tuning jobs to return. The default value is + * 10.
*/ - NextToken?: string; + MaxResults?: number; /** - *The maximum number of models to return in the response.
+ *The field to sort results by. The default is Name
.
The sort order for results. The default is Ascending
.
A summary of the model metadata.
- */ -export interface ModelMetadataSummary { /** - *The machine learning domain of the model.
+ *A string in the tuning job name. This filter returns only tuning jobs whose name + * contains the specified string.
*/ - Domain: string | undefined; + NameContains?: string; /** - *The machine learning framework of the model.
+ *A filter that returns only tuning jobs that were created after the specified + * time.
*/ - Framework: string | undefined; + CreationTimeAfter?: Date; /** - *The machine learning task of the model.
+ *A filter that returns only tuning jobs that were created before the specified + * time.
*/ - Task: string | undefined; + CreationTimeBefore?: Date; /** - *The name of the model.
+ *A filter that returns only tuning jobs that were modified after the specified + * time.
*/ - Model: string | undefined; + LastModifiedTimeAfter?: Date; /** - *The framework version of the model.
+ *A filter that returns only tuning jobs that were modified before the specified + * time.
*/ - FrameworkVersion: string | undefined; + LastModifiedTimeBefore?: Date; + + /** + *A filter that returns only tuning jobs with the specified status.
+ */ + StatusEquals?: HyperParameterTuningJobStatus | string; } -export namespace ModelMetadataSummary { +export namespace ListHyperParameterTuningJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ModelMetadataSummary): any => ({ + export const filterSensitiveLog = (obj: ListHyperParameterTuningJobsRequest): any => ({ ...obj, }); } -export interface ListModelMetadataResponse { +export interface ListHyperParameterTuningJobsResponse { /** - *A structure that holds model metadata.
+ *A list of HyperParameterTuningJobSummary objects that
+ * describe
+ * the tuning jobs that the ListHyperParameterTuningJobs
+ * request returned.
A token for getting the next set of recommendations, if there are any.
+ *If the result of this ListHyperParameterTuningJobs
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of tuning jobs,
+ * use the token in the next request.
A filter that returns only model groups created after the specified time.
+ *A filter that returns only images created on or after the specified time.
*/ CreationTimeAfter?: Date; /** - *A filter that returns only model groups created before the specified time.
+ *A filter that returns only images created on or before the specified time.
*/ CreationTimeBefore?: Date; /** - *The maximum number of results to return in the response.
+ *A filter that returns only images modified on or after the specified time.
+ */ + LastModifiedTimeAfter?: Date; + + /** + *A filter that returns only images modified on or before the specified time.
+ */ + LastModifiedTimeBefore?: Date; + + /** + *The maximum number of images to return in the response. The default value is 10.
*/ MaxResults?: number; /** - *A string in the model group name. This filter returns only model groups whose name - * contains the specified string.
+ *A filter that returns only images whose name contains the specified string.
*/ NameContains?: string; /** - *If the result of the previous ListModelPackageGroups
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * model groups, use the token in the next request.
If the previous call to ListImages
didn't return the full set of images,
+ * the call returns a token for getting the next set of images.
The field to sort results by. The default is CreationTime
.
The property used to sort results. The default value is CREATION_TIME
.
The sort order for results. The default is Ascending
.
The sort order. The default value is DESCENDING
.
Summary information about a model group.
- */ -export interface ModelPackageGroupSummary { - /** - *The name of the model group.
- */ - ModelPackageGroupName: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the model group.
- */ - ModelPackageGroupArn: string | undefined; - - /** - *A description of the model group.
- */ - ModelPackageGroupDescription?: string; - +export interface ListImagesResponse { /** - *The time that the model group was created.
+ *A list of images and their properties.
*/ - CreationTime: Date | undefined; + Images?: Image[]; /** - *The status of the model group.
+ *A token for getting the next set of images, if there are any.
*/ - ModelPackageGroupStatus: ModelPackageGroupStatus | string | undefined; + NextToken?: string; } -export namespace ModelPackageGroupSummary { +export namespace ListImagesResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ModelPackageGroupSummary): any => ({ + export const filterSensitiveLog = (obj: ListImagesResponse): any => ({ ...obj, }); } -export interface ListModelPackageGroupsOutput { +export interface ListImageVersionsRequest { /** - *A list of summaries of the model groups in your Amazon Web Services account.
+ *A filter that returns only versions created on or after the specified time.
*/ - ModelPackageGroupSummaryList: ModelPackageGroupSummary[] | undefined; + CreationTimeAfter?: Date; /** - *If the response is truncated, SageMaker returns this token. To retrieve the next set - * of model groups, use it in the subsequent request.
+ *A filter that returns only versions created on or before the specified time.
*/ - NextToken?: string; -} + CreationTimeBefore?: Date; -export namespace ListModelPackageGroupsOutput { /** - * @internal + *The name of the image to list the versions of.
*/ - export const filterSensitiveLog = (obj: ListModelPackageGroupsOutput): any => ({ - ...obj, - }); -} - -export enum ModelPackageType { - BOTH = "Both", - UNVERSIONED = "Unversioned", - VERSIONED = "Versioned", -} - -export enum ModelPackageSortBy { - CREATION_TIME = "CreationTime", - NAME = "Name", -} + ImageName: string | undefined; -export interface ListModelPackagesInput { /** - *A filter that returns only model packages created after the specified time - * (timestamp).
+ *A filter that returns only versions modified on or after the specified time.
*/ - CreationTimeAfter?: Date; + LastModifiedTimeAfter?: Date; /** - *A filter that returns only model packages created before the specified time - * (timestamp).
+ *A filter that returns only versions modified on or before the specified time.
*/ - CreationTimeBefore?: Date; + LastModifiedTimeBefore?: Date; /** - *The maximum number of model packages to return in the response.
+ *The maximum number of versions to return in the response. The default value is 10.
*/ MaxResults?: number; /** - *A string in the model package name. This filter returns only model packages whose name - * contains the specified string.
- */ - NameContains?: string; - - /** - *A filter that returns only the model packages with the specified approval - * status.
+ *If the previous call to ListImageVersions
didn't return the full set of
+ * versions, the call returns a token for getting the next set of versions.
A filter that returns only model versions that belong to the specified model group.
+ *The property used to sort results. The default value is CREATION_TIME
.
A filter that returns only the model packages of the specified type. This can be one - * of the following values.
- *
- * UNVERSIONED
- List only unversioined models.
- * This is the default value if no ModelPackageType
is specified.
- * VERSIONED
- List only versioned models.
- * BOTH
- List both versioned and unversioned models.
The sort order. The default value is DESCENDING
.
If the response to a previous ListModelPackages
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of model
- * packages, use the token in the next request.
The parameter by which to sort the results. The default is
- * CreationTime
.
A list of versions and their properties.
*/ - SortBy?: ModelPackageSortBy | string; + ImageVersions?: ImageVersion[]; /** - *The sort order for the results. The default is Ascending
.
A token for getting the next set of versions, if there are any.
*/ - SortOrder?: SortOrder | string; + NextToken?: string; } -export namespace ListModelPackagesInput { +export namespace ListImageVersionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelPackagesInput): any => ({ + export const filterSensitiveLog = (obj: ListImageVersionsResponse): any => ({ ...obj, }); } -/** - *Provides summary information about a model package.
- */ -export interface ModelPackageSummary { +export enum ListInferenceRecommendationsJobsSortBy { + CREATION_TIME = "CreationTime", + NAME = "Name", + STATUS = "Status", +} + +export interface ListInferenceRecommendationsJobsRequest { /** - *The name of the model package.
+ *A filter that returns only jobs created after the specified time (timestamp).
*/ - ModelPackageName: string | undefined; + CreationTimeAfter?: Date; /** - *If the model package is a versioned model, the model group that the versioned model - * belongs to.
+ *A filter that returns only jobs created before the specified time (timestamp).
*/ - ModelPackageGroupName?: string; + CreationTimeBefore?: Date; /** - *If the model package is a versioned model, the version of the model.
+ *A filter that returns only jobs that were last modified after the specified time (timestamp).
*/ - ModelPackageVersion?: number; + LastModifiedTimeAfter?: Date; /** - *The Amazon Resource Name (ARN) of the model package.
+ *A filter that returns only jobs that were last modified before the specified time (timestamp).
*/ - ModelPackageArn: string | undefined; + LastModifiedTimeBefore?: Date; /** - *A brief description of the model package.
+ *A string in the job name. This filter returns only recommendations whose name contains the specified string.
*/ - ModelPackageDescription?: string; + NameContains?: string; /** - *A timestamp that shows when the model package was created.
+ *A filter that retrieves only inference recommendations jobs with a specific status.
*/ - CreationTime: Date | undefined; + StatusEquals?: RecommendationJobStatus | string; /** - *The overall status of the model package.
+ *The parameter by which to sort the results.
*/ - ModelPackageStatus: ModelPackageStatus | string | undefined; + SortBy?: ListInferenceRecommendationsJobsSortBy | string; /** - *The approval status of the model. This can be one of the following values.
- *
- * APPROVED
- The model is approved
- * REJECTED
- The model is rejected.
- * PENDING_MANUAL_APPROVAL
- The model is waiting for manual
- * approval.
The sort order for the results.
+ */ + SortOrder?: SortOrder | string; + + /** + *If the response to a previous ListInferenceRecommendationsJobsRequest
request
+ * was truncated, the response includes a NextToken
. To retrieve the next set
+ * of recommendations, use the token in the next request.
The maximum number of recommendations to return in the response.
+ */ + MaxResults?: number; } -export namespace ModelPackageSummary { +export namespace ListInferenceRecommendationsJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ModelPackageSummary): any => ({ + export const filterSensitiveLog = (obj: ListInferenceRecommendationsJobsRequest): any => ({ ...obj, }); } -export interface ListModelPackagesOutput { +export interface ListInferenceRecommendationsJobsResponse { /** - *An array of ModelPackageSummary
objects, each of which lists a model
- * package.
The recommendations created from the Amazon SageMaker Inference Recommender job.
*/ - ModelPackageSummaryList: ModelPackageSummary[] | undefined; + InferenceRecommendationsJobs: InferenceRecommendationsJob[] | undefined; /** - *If the response is truncated, SageMaker returns this token. To retrieve the next set of - * model packages, use it in the subsequent request.
+ *A token for getting the next set of recommendations, if there are any.
*/ NextToken?: string; } -export namespace ListModelPackagesOutput { +export namespace ListInferenceRecommendationsJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelPackagesOutput): any => ({ + export const filterSensitiveLog = (obj: ListInferenceRecommendationsJobsResponse): any => ({ ...obj, }); } -export interface ListModelQualityJobDefinitionsRequest { +export enum SortBy { + CREATION_TIME = "CreationTime", + NAME = "Name", + STATUS = "Status", +} + +export interface ListLabelingJobsRequest { /** - *A filter that returns only model quality monitoring job definitions that are associated - * with the specified endpoint.
+ *A filter that returns only labeling jobs created after the specified time + * (timestamp).
*/ - EndpointName?: string; + CreationTimeAfter?: Date; /** - *The field to sort results by. The default is CreationTime
.
A filter that returns only labeling jobs created before the specified time + * (timestamp).
*/ - SortBy?: MonitoringJobDefinitionSortKey | string; + CreationTimeBefore?: Date; /** - *The sort order for results. The default is Descending
.
A filter that returns only labeling jobs modified after the specified time + * (timestamp).
*/ - SortOrder?: SortOrder | string; + LastModifiedTimeAfter?: Date; /** - *If the result of the previous ListModelQualityJobDefinitions
request was
- * truncated, the response includes a NextToken
. To retrieve the next set of
- * model quality monitoring job definitions, use the token in the next request.
A filter that returns only labeling jobs modified before the specified time + * (timestamp).
*/ - NextToken?: string; + LastModifiedTimeBefore?: Date; /** - *The maximum number of results to return in a call to
- * ListModelQualityJobDefinitions
.
The maximum number of labeling jobs to return in each page of the response.
*/ MaxResults?: number; /** - *A string in the transform job name. This filter returns only model quality monitoring - * job definitions whose name contains the specified string.
+ *If the result of the previous ListLabelingJobs
request was truncated, the
+ * response includes a NextToken
. To retrieve the next set of labeling jobs,
+ * use the token in the next request.
A string in the labeling job name. This filter returns only labeling jobs whose name + * contains the specified string.
*/ NameContains?: string; /** - *A filter that returns only model quality monitoring job definitions created before the - * specified time.
+ *The field to sort results by. The default is CreationTime
.
A filter that returns only model quality monitoring job definitions created after the - * specified time.
+ *The sort order for results. The default is Ascending
.
A filter that retrieves only labeling jobs with a specific status.
+ */ + StatusEquals?: LabelingJobStatus | string; } -export namespace ListModelQualityJobDefinitionsRequest { +export namespace ListLabelingJobsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelQualityJobDefinitionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListLabelingJobsRequest): any => ({ ...obj, }); } -export interface ListModelQualityJobDefinitionsResponse { +export interface ListLabelingJobsResponse { /** - *A list of summaries of model quality monitoring job definitions.
+ *An array of LabelingJobSummary
objects, each describing a labeling
+ * job.
If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of model - * quality monitoring job definitions, use it in the next request.
+ *If the response is truncated, SageMaker returns this token. To retrieve the next set of + * labeling jobs, use it in the subsequent request.
*/ NextToken?: string; } -export namespace ListModelQualityJobDefinitionsResponse { +export namespace ListLabelingJobsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelQualityJobDefinitionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListLabelingJobsResponse): any => ({ ...obj, }); } -export enum ModelSortKey { - CreationTime = "CreationTime", - Name = "Name", +export enum ListLabelingJobsForWorkteamSortByOptions { + CREATION_TIME = "CreationTime", } -export interface ListModelsInput { +export interface ListLabelingJobsForWorkteamRequest { /** - *Sorts the list of results. The default is CreationTime
.
The Amazon Resource Name (ARN) of the work team for which you want to see labeling + * jobs for.
*/ - SortBy?: ModelSortKey | string; + WorkteamArn: string | undefined; /** - *The sort order for results. The default is Descending
.
The maximum number of labeling jobs to return in each page of the response.
*/ - SortOrder?: OrderKey | string; + MaxResults?: number; /** - *If the response to a previous ListModels
request was truncated, the
- * response includes a NextToken
. To retrieve the next set of models, use the
- * token in the next request.
If the result of the previous ListLabelingJobsForWorkteam
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * labeling jobs, use the token in the next request.
The maximum number of models to return in the response.
+ *A filter that returns only labeling jobs created after the specified time + * (timestamp).
*/ - MaxResults?: number; + CreationTimeAfter?: Date; /** - *A string in the model name. This filter returns only models whose - * name contains the specified string.
+ *A filter that returns only labeling jobs created before the specified time + * (timestamp).
*/ - NameContains?: string; + CreationTimeBefore?: Date; /** - *A filter that returns only models created before the specified time - * (timestamp).
+ *A filter the limits jobs to only the ones whose job reference code contains the + * specified string.
*/ - CreationTimeBefore?: Date; + JobReferenceCodeContains?: string; /** - *A filter that returns only models with a creation time greater than or equal to the - * specified time (timestamp).
+ *The field to sort results by. The default is CreationTime
.
The sort order for results. The default is Ascending
.
Provides summary information about a model.
- */ -export interface ModelSummary { - /** - *The name of the model that you want a summary for.
- */ - ModelName: string | undefined; - +export interface ListLabelingJobsForWorkteamResponse { /** - *The Amazon Resource Name (ARN) of the model.
+ *An array of LabelingJobSummary
objects, each describing a labeling
+ * job.
A timestamp that indicates when the model was created.
+ *If the response is truncated, SageMaker returns this token. To retrieve the next set of + * labeling jobs, use it in the subsequent request.
*/ - CreationTime: Date | undefined; + NextToken?: string; } -export namespace ModelSummary { +export namespace ListLabelingJobsForWorkteamResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ModelSummary): any => ({ + export const filterSensitiveLog = (obj: ListLabelingJobsForWorkteamResponse): any => ({ ...obj, }); } -export interface ListModelsOutput { +export enum SortLineageGroupsBy { + CREATION_TIME = "CreationTime", + NAME = "Name", +} + +export interface ListLineageGroupsRequest { /** - *An array of ModelSummary
objects, each of which lists a
- * model.
A timestamp to filter against lineage groups created after a certain point in time.
*/ - Models: ModelSummary[] | undefined; + CreatedAfter?: Date; /** - *If the response is truncated, SageMaker returns this token. To retrieve the next set of - * models, use it in the subsequent request.
+ *A timestamp to filter against lineage groups created before a certain point in time.
+ */ + CreatedBefore?: Date; + + /** + *The parameter by which to sort the results. The default is
+ * CreationTime
.
The sort order for the results. The default is Ascending
.
If the response is truncated, SageMaker returns this token. To retrieve the next set of + * algorithms, use it in the subsequent request.
*/ NextToken?: string; + + /** + *The maximum number of endpoints to return in the response. This value defaults to + * 10.
+ */ + MaxResults?: number; } -export namespace ListModelsOutput { +export namespace ListLineageGroupsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListModelsOutput): any => ({ + export const filterSensitiveLog = (obj: ListLineageGroupsRequest): any => ({ ...obj, }); } -export enum MonitoringExecutionSortKey { - CREATION_TIME = "CreationTime", - SCHEDULED_TIME = "ScheduledTime", - STATUS = "Status", +export interface ListLineageGroupsResponse { + /** + *A list of lineage groups and their properties.
+ */ + LineageGroupSummaries?: LineageGroupSummary[]; + + /** + *If the response is truncated, SageMaker returns this token. To retrieve the next set of + * algorithms, use it in the subsequent request.
+ */ + NextToken?: string; } -export interface ListMonitoringExecutionsRequest { +export namespace ListLineageGroupsResponse { /** - *Name of a specific schedule to fetch jobs for.
+ * @internal */ - MonitoringScheduleName?: string; + export const filterSensitiveLog = (obj: ListLineageGroupsResponse): any => ({ + ...obj, + }); +} +export interface ListModelBiasJobDefinitionsRequest { /** - *Name of a specific endpoint to fetch jobs for.
+ *Name of the endpoint to monitor for model bias.
*/ EndpointName?: string; /** - *Whether to sort results by Status
, CreationTime
,
- * ScheduledTime
field. The default is CreationTime
.
Whether to sort results by the Name
or CreationTime
field. The
+ * default is CreationTime
.
Whether to sort the results in Ascending
or Descending
order.
@@ -9499,106 +9501,69 @@ export interface ListMonitoringExecutionsRequest {
NextToken?: string;
/**
- *
The maximum number of jobs to return in the response. The default value is 10.
+ *The maximum number of model bias jobs to return in the response. The default value is + * 10.
*/ MaxResults?: number; /** - *Filter for jobs scheduled before a specified time.
- */ - ScheduledTimeBefore?: Date; - - /** - *Filter for jobs scheduled after a specified time.
+ *Filter for model bias jobs whose name contains a specified string.
*/ - ScheduledTimeAfter?: Date; + NameContains?: string; /** - *A filter that returns only jobs created before a specified time.
+ *A filter that returns only model bias jobs created before a specified time.
*/ CreationTimeBefore?: Date; /** - *A filter that returns only jobs created after a specified time.
+ *A filter that returns only model bias jobs created after a specified time.
*/ CreationTimeAfter?: Date; - - /** - *A filter that returns only jobs modified after a specified time.
- */ - LastModifiedTimeBefore?: Date; - - /** - *A filter that returns only jobs modified before a specified time.
- */ - LastModifiedTimeAfter?: Date; - - /** - *A filter that retrieves only jobs with a specific status.
- */ - StatusEquals?: ExecutionStatus | string; - - /** - *Gets a list of the monitoring job runs of the specified monitoring job - * definitions.
- */ - MonitoringJobDefinitionName?: string; - - /** - *A filter that returns only the monitoring job runs of the specified monitoring - * type.
- */ - MonitoringTypeEquals?: MonitoringType | string; } -export namespace ListMonitoringExecutionsRequest { +export namespace ListModelBiasJobDefinitionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListMonitoringExecutionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListModelBiasJobDefinitionsRequest): any => ({ ...obj, }); } -export interface ListMonitoringExecutionsResponse { +export interface ListModelBiasJobDefinitionsResponse { /** - *A JSON array in which each element is a summary for a monitoring execution.
+ *A JSON array in which each element is a summary for a model bias jobs.
*/ - MonitoringExecutionSummaries: MonitoringExecutionSummary[] | undefined; + JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; /** *If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, - * use it in the subsequent reques
+ * use it in the subsequent request. */ NextToken?: string; } -export namespace ListMonitoringExecutionsResponse { +export namespace ListModelBiasJobDefinitionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListMonitoringExecutionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListModelBiasJobDefinitionsResponse): any => ({ ...obj, }); } -export enum MonitoringScheduleSortKey { - CREATION_TIME = "CreationTime", - NAME = "Name", - STATUS = "Status", -} - -export interface ListMonitoringSchedulesRequest { +export interface ListModelExplainabilityJobDefinitionsRequest { /** - *Name of a specific endpoint to fetch schedules for.
+ *Name of the endpoint to monitor for model explainability.
*/ EndpointName?: string; /** - *Whether to sort results by Status
, CreationTime
,
- * ScheduledTime
field. The default is CreationTime
.
Whether to sort results by the Name
or CreationTime
field. The
+ * default is CreationTime
.
Whether to sort the results in Ascending
or Descending
order.
@@ -9618,1391 +9583,1427 @@ export interface ListMonitoringSchedulesRequest {
MaxResults?: number;
/**
- *
Filter for monitoring schedules whose name contains a specified string.
+ *Filter for model explainability jobs whose name contains a specified string.
*/ NameContains?: string; /** - *A filter that returns only monitoring schedules created before a specified time.
+ *A filter that returns only model explainability jobs created before a specified + * time.
*/ CreationTimeBefore?: Date; /** - *A filter that returns only monitoring schedules created after a specified time.
+ *A filter that returns only model explainability jobs created after a specified + * time.
*/ CreationTimeAfter?: Date; +} +export namespace ListModelExplainabilityJobDefinitionsRequest { /** - *A filter that returns only monitoring schedules modified before a specified time.
+ * @internal */ - LastModifiedTimeBefore?: Date; + export const filterSensitiveLog = (obj: ListModelExplainabilityJobDefinitionsRequest): any => ({ + ...obj, + }); +} +export interface ListModelExplainabilityJobDefinitionsResponse { /** - *A filter that returns only monitoring schedules modified after a specified time.
+ *A JSON array in which each element is a summary for a explainability bias jobs.
*/ - LastModifiedTimeAfter?: Date; + JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; /** - *A filter that returns only monitoring schedules modified before a specified time.
+ *If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, + * use it in the subsequent request.
*/ - StatusEquals?: ScheduleStatus | string; + NextToken?: string; +} +export namespace ListModelExplainabilityJobDefinitionsResponse { /** - *Gets a list of the monitoring schedules for the specified monitoring job - * definition.
+ * @internal */ - MonitoringJobDefinitionName?: string; + export const filterSensitiveLog = (obj: ListModelExplainabilityJobDefinitionsResponse): any => ({ + ...obj, + }); +} + +export enum ModelMetadataFilterType { + DOMAIN = "Domain", + FRAMEWORK = "Framework", + FRAMEWORKVERSION = "FrameworkVersion", + TASK = "Task", +} +/** + *Part of the search expression. You can specify the name and value + * (domain, task, framework, framework version, task, and model).
+ */ +export interface ModelMetadataFilter { /** - *A filter that returns only the monitoring schedules for the specified monitoring - * type.
+ *The name of the of the model to filter by.
*/ - MonitoringTypeEquals?: MonitoringType | string; + Name: ModelMetadataFilterType | string | undefined; + + /** + *The value to filter the model metadata.
+ */ + Value: string | undefined; } -export namespace ListMonitoringSchedulesRequest { +export namespace ModelMetadataFilter { /** * @internal */ - export const filterSensitiveLog = (obj: ListMonitoringSchedulesRequest): any => ({ + export const filterSensitiveLog = (obj: ModelMetadataFilter): any => ({ ...obj, }); } /** - *Summarizes the monitoring schedule.
+ *One or more filters that searches for the specified resource or resources in + * a search. All resource objects that satisfy the expression's condition are + * included in the search results
*/ -export interface MonitoringScheduleSummary { +export interface ModelMetadataSearchExpression { /** - *The name of the monitoring schedule.
+ *A list of filter objects.
*/ - MonitoringScheduleName: string | undefined; + Filters?: ModelMetadataFilter[]; +} +export namespace ModelMetadataSearchExpression { /** - *The Amazon Resource Name (ARN) of the monitoring schedule.
+ * @internal */ - MonitoringScheduleArn: string | undefined; + export const filterSensitiveLog = (obj: ModelMetadataSearchExpression): any => ({ + ...obj, + }); +} +export interface ListModelMetadataRequest { /** - *The creation time of the monitoring schedule.
+ *One or more filters that searches for the specified resource or resources + * in a search. All resource objects that satisfy the expression's condition are + * included in the search results. Specify the Framework, FrameworkVersion, Domain + * or Task to filter supported. Filter names and values are case-sensitive.
*/ - CreationTime: Date | undefined; + SearchExpression?: ModelMetadataSearchExpression; /** - *The last time the monitoring schedule was modified.
+ *If the response to a previous ListModelMetadataResponse
request was truncated,
+ * the response includes a NextToken. To retrieve the next set of model metadata,
+ * use the token in the next request.
The status of the monitoring schedule.
+ *The maximum number of models to return in the response.
*/ - MonitoringScheduleStatus: ScheduleStatus | string | undefined; + MaxResults?: number; +} +export namespace ListModelMetadataRequest { /** - *The name of the endpoint using the monitoring schedule.
+ * @internal */ - EndpointName?: string; + export const filterSensitiveLog = (obj: ListModelMetadataRequest): any => ({ + ...obj, + }); +} +/** + *A summary of the model metadata.
+ */ +export interface ModelMetadataSummary { /** - *The name of the monitoring job definition that the schedule is for.
+ *The machine learning domain of the model.
*/ - MonitoringJobDefinitionName?: string; + Domain: string | undefined; /** - *The type of the monitoring job definition that the schedule is for.
+ *The machine learning framework of the model.
*/ - MonitoringType?: MonitoringType | string; + Framework: string | undefined; + + /** + *The machine learning task of the model.
+ */ + Task: string | undefined; + + /** + *The name of the model.
+ */ + Model: string | undefined; + + /** + *The framework version of the model.
+ */ + FrameworkVersion: string | undefined; } -export namespace MonitoringScheduleSummary { +export namespace ModelMetadataSummary { /** * @internal */ - export const filterSensitiveLog = (obj: MonitoringScheduleSummary): any => ({ + export const filterSensitiveLog = (obj: ModelMetadataSummary): any => ({ ...obj, }); } -export interface ListMonitoringSchedulesResponse { +export interface ListModelMetadataResponse { /** - *A JSON array in which each element is a summary for a monitoring schedule.
+ *A structure that holds model metadata.
*/ - MonitoringScheduleSummaries: MonitoringScheduleSummary[] | undefined; + ModelMetadataSummaries: ModelMetadataSummary[] | undefined; /** - *If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, - * use it in the subsequent request.
+ *A token for getting the next set of recommendations, if there are any.
*/ NextToken?: string; } -export namespace ListMonitoringSchedulesResponse { +export namespace ListModelMetadataResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListMonitoringSchedulesResponse): any => ({ + export const filterSensitiveLog = (obj: ListModelMetadataResponse): any => ({ ...obj, }); } -export enum NotebookInstanceLifecycleConfigSortKey { +export enum ModelPackageGroupSortBy { CREATION_TIME = "CreationTime", - LAST_MODIFIED_TIME = "LastModifiedTime", NAME = "Name", } -export enum NotebookInstanceLifecycleConfigSortOrder { - ASCENDING = "Ascending", - DESCENDING = "Descending", -} - -export interface ListNotebookInstanceLifecycleConfigsInput { - /** - *If the result of a ListNotebookInstanceLifecycleConfigs
request was
- * truncated, the response includes a NextToken
. To get the next set of
- * lifecycle configurations, use the token in the next request.
The maximum number of lifecycle configurations to return in the response.
- */ - MaxResults?: number; - +export interface ListModelPackageGroupsInput { /** - *Sorts the list of results. The default is CreationTime
.
A filter that returns only model groups created after the specified time.
*/ - SortBy?: NotebookInstanceLifecycleConfigSortKey | string; + CreationTimeAfter?: Date; /** - *The sort order for results.
+ *A filter that returns only model groups created before the specified time.
*/ - SortOrder?: NotebookInstanceLifecycleConfigSortOrder | string; + CreationTimeBefore?: Date; /** - *A string in the lifecycle configuration name. This filter returns only lifecycle - * configurations whose name contains the specified string.
+ *The maximum number of results to return in the response.
*/ - NameContains?: string; + MaxResults?: number; /** - *A filter that returns only lifecycle configurations that were created before the - * specified time (timestamp).
+ *A string in the model group name. This filter returns only model groups whose name + * contains the specified string.
*/ - CreationTimeBefore?: Date; + NameContains?: string; /** - *A filter that returns only lifecycle configurations that were created after the - * specified time (timestamp).
+ *If the result of the previous ListModelPackageGroups
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * model groups, use the token in the next request.
A filter that returns only lifecycle configurations that were modified before the - * specified time (timestamp).
+ *The field to sort results by. The default is CreationTime
.
A filter that returns only lifecycle configurations that were modified after the - * specified time (timestamp).
+ *The sort order for results. The default is Ascending
.
Provides a summary of a notebook instance lifecycle configuration.
+ *Summary information about a model group.
*/ -export interface NotebookInstanceLifecycleConfigSummary { +export interface ModelPackageGroupSummary { /** - *The name of the lifecycle configuration.
+ *The name of the model group.
*/ - NotebookInstanceLifecycleConfigName: string | undefined; + ModelPackageGroupName: string | undefined; /** - *The Amazon Resource Name (ARN) of the lifecycle configuration.
+ *The Amazon Resource Name (ARN) of the model group.
*/ - NotebookInstanceLifecycleConfigArn: string | undefined; + ModelPackageGroupArn: string | undefined; /** - *A timestamp that tells when the lifecycle configuration was created.
+ *A description of the model group.
*/ - CreationTime?: Date; + ModelPackageGroupDescription?: string; /** - *A timestamp that tells when the lifecycle configuration was last modified.
+ *The time that the model group was created.
*/ - LastModifiedTime?: Date; + CreationTime: Date | undefined; + + /** + *The status of the model group.
+ */ + ModelPackageGroupStatus: ModelPackageGroupStatus | string | undefined; } -export namespace NotebookInstanceLifecycleConfigSummary { +export namespace ModelPackageGroupSummary { /** * @internal */ - export const filterSensitiveLog = (obj: NotebookInstanceLifecycleConfigSummary): any => ({ + export const filterSensitiveLog = (obj: ModelPackageGroupSummary): any => ({ ...obj, }); } -export interface ListNotebookInstanceLifecycleConfigsOutput { +export interface ListModelPackageGroupsOutput { /** - *If the response is truncated, SageMaker returns this token. To get the next set of - * lifecycle configurations, use it in the next request.
+ *A list of summaries of the model groups in your Amazon Web Services account.
*/ - NextToken?: string; + ModelPackageGroupSummaryList: ModelPackageGroupSummary[] | undefined; /** - *An array of NotebookInstanceLifecycleConfiguration
objects, each listing
- * a lifecycle configuration.
If the response is truncated, SageMaker returns this token. To retrieve the next set + * of model groups, use it in the subsequent request.
*/ - NotebookInstanceLifecycleConfigs?: NotebookInstanceLifecycleConfigSummary[]; + NextToken?: string; } -export namespace ListNotebookInstanceLifecycleConfigsOutput { +export namespace ListModelPackageGroupsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListNotebookInstanceLifecycleConfigsOutput): any => ({ + export const filterSensitiveLog = (obj: ListModelPackageGroupsOutput): any => ({ ...obj, }); } -export enum NotebookInstanceSortKey { +export enum ModelPackageType { + BOTH = "Both", + UNVERSIONED = "Unversioned", + VERSIONED = "Versioned", +} + +export enum ModelPackageSortBy { CREATION_TIME = "CreationTime", NAME = "Name", - STATUS = "Status", } -export enum NotebookInstanceSortOrder { - ASCENDING = "Ascending", - DESCENDING = "Descending", -} +export interface ListModelPackagesInput { + /** + *A filter that returns only model packages created after the specified time + * (timestamp).
+ */ + CreationTimeAfter?: Date; -export interface ListNotebookInstancesInput { /** - * If the previous call to the ListNotebookInstances
is truncated, the
- * response includes a NextToken
. You can use this token in your subsequent
- * ListNotebookInstances
request to fetch the next set of notebook
- * instances.
You might specify a filter or a sort order in your request. When response is - * truncated, you must use the same values for the filer and sort order in the next - * request.
- *A filter that returns only model packages created before the specified time + * (timestamp).
*/ - NextToken?: string; + CreationTimeBefore?: Date; /** - *The maximum number of notebook instances to return.
+ *The maximum number of model packages to return in the response.
*/ MaxResults?: number; /** - *The field to sort results by. The default is Name
.
A string in the model package name. This filter returns only model packages whose name + * contains the specified string.
*/ - SortBy?: NotebookInstanceSortKey | string; + NameContains?: string; /** - *The sort order for results.
+ *A filter that returns only the model packages with the specified approval + * status.
*/ - SortOrder?: NotebookInstanceSortOrder | string; + ModelApprovalStatus?: ModelApprovalStatus | string; /** - *A string in the notebook instances' name. This filter returns only notebook - * instances whose name contains the specified string.
+ *A filter that returns only model versions that belong to the specified model group.
*/ - NameContains?: string; + ModelPackageGroupName?: string; /** - *A filter that returns only notebook instances that were created before the - * specified time (timestamp).
+ *A filter that returns only the model packages of the specified type. This can be one + * of the following values.
+ *
+ * UNVERSIONED
- List only unversioined models.
+ * This is the default value if no ModelPackageType
is specified.
+ * VERSIONED
- List only versioned models.
+ * BOTH
- List both versioned and unversioned models.
A filter that returns only notebook instances that were created after the specified - * time (timestamp).
+ *If the response to a previous ListModelPackages
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of model
+ * packages, use the token in the next request.
A filter that returns only notebook instances that were modified before the - * specified time (timestamp).
+ *The parameter by which to sort the results. The default is
+ * CreationTime
.
A filter that returns only notebook instances that were modified after the - * specified time (timestamp).
+ *The sort order for the results. The default is Ascending
.
A filter that returns only notebook instances with the specified status.
+ * @internal */ - StatusEquals?: NotebookInstanceStatus | string; + export const filterSensitiveLog = (obj: ListModelPackagesInput): any => ({ + ...obj, + }); +} +/** + *Provides summary information about a model package.
+ */ +export interface ModelPackageSummary { /** - *A string in the name of a notebook instances lifecycle configuration associated with - * this notebook instance. This filter returns only notebook instances associated with a - * lifecycle configuration with a name that contains the specified string.
+ *The name of the model package.
*/ - NotebookInstanceLifecycleConfigNameContains?: string; + ModelPackageName: string | undefined; /** - *A string in the name or URL of a Git repository associated with this notebook - * instance. This filter returns only notebook instances associated with a git repository - * with a name that contains the specified string.
+ *If the model package is a versioned model, the model group that the versioned model + * belongs to.
+ */ + ModelPackageGroupName?: string; + + /** + *If the model package is a versioned model, the version of the model.
+ */ + ModelPackageVersion?: number; + + /** + *The Amazon Resource Name (ARN) of the model package.
+ */ + ModelPackageArn: string | undefined; + + /** + *A brief description of the model package.
+ */ + ModelPackageDescription?: string; + + /** + *A timestamp that shows when the model package was created.
+ */ + CreationTime: Date | undefined; + + /** + *The overall status of the model package.
+ */ + ModelPackageStatus: ModelPackageStatus | string | undefined; + + /** + *The approval status of the model. This can be one of the following values.
+ *
+ * APPROVED
- The model is approved
+ * REJECTED
- The model is rejected.
+ * PENDING_MANUAL_APPROVAL
- The model is waiting for manual
+ * approval.
An array of ModelPackageSummary
objects, each of which lists a model
+ * package.
A filter that returns only notebook instances with associated with the specified git - * repository.
+ *If the response is truncated, SageMaker returns this token. To retrieve the next set of + * model packages, use it in the subsequent request.
*/ - AdditionalCodeRepositoryEquals?: string; + NextToken?: string; } -export namespace ListNotebookInstancesInput { +export namespace ListModelPackagesOutput { /** * @internal */ - export const filterSensitiveLog = (obj: ListNotebookInstancesInput): any => ({ + export const filterSensitiveLog = (obj: ListModelPackagesOutput): any => ({ ...obj, }); } -/** - *Provides summary information for an SageMaker notebook instance.
- */ -export interface NotebookInstanceSummary { - /** - *The name of the notebook instance that you want a summary for.
- */ - NotebookInstanceName: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the notebook instance.
- */ - NotebookInstanceArn: string | undefined; - +export interface ListModelQualityJobDefinitionsRequest { /** - *The status of the notebook instance.
+ *A filter that returns only model quality monitoring job definitions that are associated + * with the specified endpoint.
*/ - NotebookInstanceStatus?: NotebookInstanceStatus | string; + EndpointName?: string; /** - *The URL that you use to connect to the Jupyter notebook running in your notebook - * instance.
+ *The field to sort results by. The default is CreationTime
.
The type of ML compute instance that the notebook instance is running on.
+ *The sort order for results. The default is Descending
.
A timestamp that shows when the notebook instance was created.
+ *If the result of the previous ListModelQualityJobDefinitions
request was
+ * truncated, the response includes a NextToken
. To retrieve the next set of
+ * model quality monitoring job definitions, use the token in the next request.
A timestamp that shows when the notebook instance was last modified.
+ *The maximum number of results to return in a call to
+ * ListModelQualityJobDefinitions
.
The name of a notebook instance lifecycle configuration associated with this notebook - * instance.
- *For information about notebook instance lifestyle configurations, see Step - * 2.1: (Optional) Customize a Notebook Instance.
+ *A string in the transform job name. This filter returns only model quality monitoring + * job definitions whose name contains the specified string.
*/ - NotebookInstanceLifecycleConfigName?: string; + NameContains?: string; /** - *The Git repository associated with the notebook instance as its default code - * repository. This can be either the name of a Git repository stored as a resource in your - * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any - * other Git repository. When you open a notebook instance, it opens in the directory that - * contains this repository. For more information, see Associating Git Repositories with SageMaker - * Notebook Instances.
+ *A filter that returns only model quality monitoring job definitions created before the + * specified time.
*/ - DefaultCodeRepository?: string; + CreationTimeBefore?: Date; /** - *An array of up to three Git repositories associated with the notebook instance. These - * can be either the names of Git repositories stored as resources in your account, or the - * URL of Git repositories in Amazon Web Services CodeCommit or in any - * other Git repository. These repositories are cloned at the same level as the default - * repository of your notebook instance. For more information, see Associating Git - * Repositories with SageMaker Notebook Instances.
+ *A filter that returns only model quality monitoring job definitions created after the + * specified time.
*/ - AdditionalCodeRepositories?: string[]; + CreationTimeAfter?: Date; } -export namespace NotebookInstanceSummary { +export namespace ListModelQualityJobDefinitionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: NotebookInstanceSummary): any => ({ + export const filterSensitiveLog = (obj: ListModelQualityJobDefinitionsRequest): any => ({ ...obj, }); } -export interface ListNotebookInstancesOutput { +export interface ListModelQualityJobDefinitionsResponse { /** - *If the response to the previous ListNotebookInstances
request was
- * truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use
- * the token in the next request.
A list of summaries of model quality monitoring job definitions.
*/ - NextToken?: string; + JobDefinitionSummaries: MonitoringJobDefinitionSummary[] | undefined; /** - *An array of NotebookInstanceSummary
objects, one for each notebook
- * instance.
If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of model + * quality monitoring job definitions, use it in the next request.
*/ - NotebookInstances?: NotebookInstanceSummary[]; + NextToken?: string; } -export namespace ListNotebookInstancesOutput { +export namespace ListModelQualityJobDefinitionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListNotebookInstancesOutput): any => ({ + export const filterSensitiveLog = (obj: ListModelQualityJobDefinitionsResponse): any => ({ ...obj, }); } -export enum SortPipelineExecutionsBy { - CREATION_TIME = "CreationTime", - PIPELINE_EXECUTION_ARN = "PipelineExecutionArn", +export enum ModelSortKey { + CreationTime = "CreationTime", + Name = "Name", } -export interface ListPipelineExecutionsRequest { +export interface ListModelsInput { /** - *The name of the pipeline.
+ *Sorts the list of results. The default is CreationTime
.
A filter that returns the pipeline executions that were created after a specified - * time.
+ *The sort order for results. The default is Descending
.
A filter that returns the pipeline executions that were created before a specified - * time.
+ *If the response to a previous ListModels
request was truncated, the
+ * response includes a NextToken
. To retrieve the next set of models, use the
+ * token in the next request.
The field by which to sort results. The default is CreatedTime
.
The maximum number of models to return in the response.
*/ - SortBy?: SortPipelineExecutionsBy | string; + MaxResults?: number; /** - *The sort order for results.
+ *A string in the model name. This filter returns only models whose + * name contains the specified string.
*/ - SortOrder?: SortOrder | string; + NameContains?: string; /** - *If the result of the previous ListPipelineExecutions
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipeline executions, use the token in the next request.
A filter that returns only models created before the specified time + * (timestamp).
*/ - NextToken?: string; + CreationTimeBefore?: Date; /** - *The maximum number of pipeline executions to return in the response.
+ *A filter that returns only models with a creation time greater than or equal to the + * specified time (timestamp).
*/ - MaxResults?: number; + CreationTimeAfter?: Date; } -export namespace ListPipelineExecutionsRequest { +export namespace ListModelsInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListPipelineExecutionsRequest): any => ({ + export const filterSensitiveLog = (obj: ListModelsInput): any => ({ ...obj, }); } /** - *A pipeline execution summary.
+ *Provides summary information about a model.
*/ -export interface PipelineExecutionSummary { +export interface ModelSummary { /** - *The Amazon Resource Name (ARN) of the pipeline execution.
+ *The name of the model that you want a summary for.
*/ - PipelineExecutionArn?: string; + ModelName: string | undefined; /** - *The start time of the pipeline execution.
+ *The Amazon Resource Name (ARN) of the model.
*/ - StartTime?: Date; + ModelArn: string | undefined; /** - *The status of the pipeline execution.
+ *A timestamp that indicates when the model was created.
*/ - PipelineExecutionStatus?: PipelineExecutionStatus | string; + CreationTime: Date | undefined; +} +export namespace ModelSummary { /** - *The description of the pipeline execution.
+ * @internal */ - PipelineExecutionDescription?: string; + export const filterSensitiveLog = (obj: ModelSummary): any => ({ + ...obj, + }); +} +export interface ListModelsOutput { /** - *The display name of the pipeline execution.
+ *An array of ModelSummary
objects, each of which lists a
+ * model.
A message generated by SageMaker Pipelines describing why the pipeline execution failed.
+ *If the response is truncated, SageMaker returns this token. To retrieve the next set of + * models, use it in the subsequent request.
*/ - PipelineExecutionFailureReason?: string; + NextToken?: string; } -export namespace PipelineExecutionSummary { +export namespace ListModelsOutput { /** * @internal */ - export const filterSensitiveLog = (obj: PipelineExecutionSummary): any => ({ + export const filterSensitiveLog = (obj: ListModelsOutput): any => ({ ...obj, }); } -export interface ListPipelineExecutionsResponse { +export enum MonitoringExecutionSortKey { + CREATION_TIME = "CreationTime", + SCHEDULED_TIME = "ScheduledTime", + STATUS = "Status", +} + +export interface ListMonitoringExecutionsRequest { /** - *Contains a sorted list of pipeline execution summary objects matching the specified - * filters. Each run summary includes the Amazon Resource Name (ARN) of the pipeline execution, the run date, - * and the status. This list can be empty.
+ *Name of a specific schedule to fetch jobs for.
*/ - PipelineExecutionSummaries?: PipelineExecutionSummary[]; + MonitoringScheduleName?: string; /** - *If the result of the previous ListPipelineExecutions
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipeline executions, use the token in the next request.
Name of a specific endpoint to fetch jobs for.
*/ - NextToken?: string; -} + EndpointName?: string; -export namespace ListPipelineExecutionsResponse { /** - * @internal + *Whether to sort results by Status
, CreationTime
,
+ * ScheduledTime
field. The default is CreationTime
.
The Amazon Resource Name (ARN) of the pipeline execution.
+ *Whether to sort the results in Ascending
or Descending
order.
+ * The default is Descending
.
If the result of the previous ListPipelineExecutionSteps
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.
The token returned if the response is truncated. To retrieve the next set of job + * executions, use it in the next request.
*/ NextToken?: string; /** - *The maximum number of pipeline execution steps to return in the response.
+ *The maximum number of jobs to return in the response. The default value is 10.
*/ MaxResults?: number; /** - *The field by which to sort results. The default is CreatedTime
.
Filter for jobs scheduled before a specified time.
*/ - SortOrder?: SortOrder | string; -} + ScheduledTimeBefore?: Date; -export namespace ListPipelineExecutionStepsRequest { /** - * @internal + *Filter for jobs scheduled after a specified time.
*/ - export const filterSensitiveLog = (obj: ListPipelineExecutionStepsRequest): any => ({ - ...obj, - }); -} + ScheduledTimeAfter?: Date; + + /** + *A filter that returns only jobs created before a specified time.
+ */ + CreationTimeBefore?: Date; + + /** + *A filter that returns only jobs created after a specified time.
+ */ + CreationTimeAfter?: Date; + + /** + *A filter that returns only jobs modified after a specified time.
+ */ + LastModifiedTimeBefore?: Date; + + /** + *A filter that returns only jobs modified before a specified time.
+ */ + LastModifiedTimeAfter?: Date; -/** - *Metadata for Model steps.
- */ -export interface ModelStepMetadata { /** - *The Amazon Resource Name (ARN) of the created model.
+ *A filter that retrieves only jobs with a specific status.
*/ - Arn?: string; -} + StatusEquals?: ExecutionStatus | string; -export namespace ModelStepMetadata { /** - * @internal + *Gets a list of the monitoring job runs of the specified monitoring job + * definitions.
*/ - export const filterSensitiveLog = (obj: ModelStepMetadata): any => ({ - ...obj, - }); -} + MonitoringJobDefinitionName?: string; -/** - *Metadata for a processing job step.
- */ -export interface ProcessingJobStepMetadata { /** - *The Amazon Resource Name (ARN) of the processing job.
+ *A filter that returns only the monitoring job runs of the specified monitoring + * type.
*/ - Arn?: string; + MonitoringTypeEquals?: MonitoringType | string; } -export namespace ProcessingJobStepMetadata { +export namespace ListMonitoringExecutionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ProcessingJobStepMetadata): any => ({ + export const filterSensitiveLog = (obj: ListMonitoringExecutionsRequest): any => ({ ...obj, }); } -/** - *Container for the metadata for a Quality check step. For more information, see - * the topic on QualityCheck step in the Amazon SageMaker Developer Guide. - *
- */ -export interface QualityCheckStepMetadata { - /** - *The type of the Quality check step.
- */ - CheckType?: string; - +export interface ListMonitoringExecutionsResponse { /** - *The Amazon S3 URI of the baseline statistics file used for the drift check.
+ *A JSON array in which each element is a summary for a monitoring execution.
*/ - BaselineUsedForDriftCheckStatistics?: string; + MonitoringExecutionSummaries: MonitoringExecutionSummary[] | undefined; /** - *The Amazon S3 URI of the baseline constraints file used for the drift check.
+ *If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, + * use it in the subsequent reques
*/ - BaselineUsedForDriftCheckConstraints?: string; + NextToken?: string; +} +export namespace ListMonitoringExecutionsResponse { /** - *The Amazon S3 URI of the newly calculated baseline statistics file.
+ * @internal */ - CalculatedBaselineStatistics?: string; + export const filterSensitiveLog = (obj: ListMonitoringExecutionsResponse): any => ({ + ...obj, + }); +} - /** - *The Amazon S3 URI of the newly calculated baseline constraints file.
- */ - CalculatedBaselineConstraints?: string; +export enum MonitoringScheduleSortKey { + CREATION_TIME = "CreationTime", + NAME = "Name", + STATUS = "Status", +} +export interface ListMonitoringSchedulesRequest { /** - *The model package group name.
+ *Name of a specific endpoint to fetch schedules for.
*/ - ModelPackageGroupName?: string; + EndpointName?: string; /** - *The Amazon S3 URI of violation report if violations are detected.
+ *Whether to sort results by Status
, CreationTime
,
+ * ScheduledTime
field. The default is CreationTime
.
The Amazon Resource Name (ARN) of the Quality check processing job that was run by this step execution.
+ *Whether to sort the results in Ascending
or Descending
order.
+ * The default is Descending
.
This flag indicates if the drift check against the previous baseline will be skipped or not.
- * If it is set to False
, the previous baseline of the configured check type must be available.
The token returned if the response is truncated. To retrieve the next set of job + * executions, use it in the next request.
*/ - SkipCheck?: boolean; + NextToken?: string; /** - *This flag indicates if a newly calculated baseline can be accessed through step properties
- * BaselineUsedForDriftCheckConstraints
and BaselineUsedForDriftCheckStatistics
.
- * If it is set to False
, the previous baseline of the configured check type must also be available.
- * These can be accessed through the BaselineUsedForDriftCheckConstraints
and
- * BaselineUsedForDriftCheckStatistics
properties.
The maximum number of jobs to return in the response. The default value is 10.
*/ - RegisterNewBaseline?: boolean; -} + MaxResults?: number; -export namespace QualityCheckStepMetadata { /** - * @internal + *Filter for monitoring schedules whose name contains a specified string.
*/ - export const filterSensitiveLog = (obj: QualityCheckStepMetadata): any => ({ - ...obj, - }); -} + NameContains?: string; -/** - *Metadata for a register model job step.
- */ -export interface RegisterModelStepMetadata { /** - *The Amazon Resource Name (ARN) of the model package.
+ *A filter that returns only monitoring schedules created before a specified time.
*/ - Arn?: string; -} + CreationTimeBefore?: Date; -export namespace RegisterModelStepMetadata { /** - * @internal + *A filter that returns only monitoring schedules created after a specified time.
*/ - export const filterSensitiveLog = (obj: RegisterModelStepMetadata): any => ({ - ...obj, - }); -} + CreationTimeAfter?: Date; -/** - *Metadata for a training job step.
- */ -export interface TrainingJobStepMetadata { /** - *The Amazon Resource Name (ARN) of the training job that was run by this step execution.
+ *A filter that returns only monitoring schedules modified before a specified time.
*/ - Arn?: string; -} + LastModifiedTimeBefore?: Date; -export namespace TrainingJobStepMetadata { /** - * @internal + *A filter that returns only monitoring schedules modified after a specified time.
*/ - export const filterSensitiveLog = (obj: TrainingJobStepMetadata): any => ({ - ...obj, - }); -} + LastModifiedTimeAfter?: Date; -/** - *Metadata for a transform job step.
- */ -export interface TransformJobStepMetadata { /** - *The Amazon Resource Name (ARN) of the transform job that was run by this step execution.
+ *A filter that returns only monitoring schedules modified before a specified time.
*/ - Arn?: string; -} + StatusEquals?: ScheduleStatus | string; -export namespace TransformJobStepMetadata { /** - * @internal + *Gets a list of the monitoring schedules for the specified monitoring job + * definition.
*/ - export const filterSensitiveLog = (obj: TransformJobStepMetadata): any => ({ - ...obj, - }); -} + MonitoringJobDefinitionName?: string; -/** - *Metadata for a tuning step.
- */ -export interface TuningJobStepMetaData { /** - *The Amazon Resource Name (ARN) of the tuning job that was run by this step execution.
+ *A filter that returns only the monitoring schedules for the specified monitoring + * type.
*/ - Arn?: string; + MonitoringTypeEquals?: MonitoringType | string; } -export namespace TuningJobStepMetaData { +export namespace ListMonitoringSchedulesRequest { /** * @internal */ - export const filterSensitiveLog = (obj: TuningJobStepMetaData): any => ({ + export const filterSensitiveLog = (obj: ListMonitoringSchedulesRequest): any => ({ ...obj, }); } /** - *Metadata for a step execution.
+ *Summarizes the monitoring schedule.
*/ -export interface PipelineExecutionStepMetadata { - /** - *The Amazon Resource Name (ARN) of the training job that was run by this step execution.
- */ - TrainingJob?: TrainingJobStepMetadata; - - /** - *The Amazon Resource Name (ARN) of the processing job that was run by this step execution.
- */ - ProcessingJob?: ProcessingJobStepMetadata; - +export interface MonitoringScheduleSummary { /** - *The Amazon Resource Name (ARN) of the transform job that was run by this step execution.
+ *The name of the monitoring schedule.
*/ - TransformJob?: TransformJobStepMetadata; + MonitoringScheduleName: string | undefined; /** - *The Amazon Resource Name (ARN) of the tuning job that was run by this step execution.
+ *The Amazon Resource Name (ARN) of the monitoring schedule.
*/ - TuningJob?: TuningJobStepMetaData; + MonitoringScheduleArn: string | undefined; /** - *The Amazon Resource Name (ARN) of the model that was created by this step execution.
+ *The creation time of the monitoring schedule.
*/ - Model?: ModelStepMetadata; + CreationTime: Date | undefined; /** - *The Amazon Resource Name (ARN) of the model package the model was registered to by this step execution.
+ *The last time the monitoring schedule was modified.
*/ - RegisterModel?: RegisterModelStepMetadata; + LastModifiedTime: Date | undefined; /** - *The outcome of the condition evaluation that was run by this step execution.
+ *The status of the monitoring schedule.
*/ - Condition?: ConditionStepMetadata; + MonitoringScheduleStatus: ScheduleStatus | string | undefined; /** - *The URL of the Amazon SQS queue used by this step execution, the pipeline generated token, - * and a list of output parameters.
+ *The name of the endpoint using the monitoring schedule.
*/ - Callback?: CallbackStepMetadata; + EndpointName?: string; /** - *The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution and a list of - * output parameters.
+ *The name of the monitoring job definition that the schedule is for.
*/ - Lambda?: LambdaStepMetadata; + MonitoringJobDefinitionName?: string; /** - *The configurations and outcomes of the check step execution. This includes:
- *The type of the check conducted,
- *The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check.
- *The Amazon S3 URIs of newly calculated baseline constraints and statistics.
- *The model package group name provided.
- *The Amazon S3 URI of the violation report if violations detected.
- *The Amazon Resource Name (ARN) of check processing job initiated by the step execution.
- *The boolean flags indicating if the drift check is skipped.
- *If step property BaselineUsedForDriftCheck
is set the same as
- * CalculatedBaseline
.
The type of the monitoring job definition that the schedule is for.
*/ - QualityCheck?: QualityCheckStepMetadata; + MonitoringType?: MonitoringType | string; +} +export namespace MonitoringScheduleSummary { /** - *Container for the metadata for a Clarify check step. The configurations - * and outcomes of the check step execution. This includes:
- *The type of the check conducted,
- *The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check.
- *The Amazon S3 URIs of newly calculated baseline constraints and statistics.
- *The model package group name provided.
- *The Amazon S3 URI of the violation report if violations detected.
- *The Amazon Resource Name (ARN) of check processing job initiated by the step execution.
- *The boolean flags indicating if the drift check is skipped.
- *If step property BaselineUsedForDriftCheck
is set the same as
- * CalculatedBaseline
.
The configurations and outcomes of an EMR step execution.
+ *A JSON array in which each element is a summary for a monitoring schedule.
*/ - EMR?: EMRStepMetadata; + MonitoringScheduleSummaries: MonitoringScheduleSummary[] | undefined; /** - *The configurations and outcomes of a Fail step execution.
+ *If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of jobs, + * use it in the subsequent request.
*/ - Fail?: FailStepMetadata; + NextToken?: string; } -export namespace PipelineExecutionStepMetadata { +export namespace ListMonitoringSchedulesResponse { /** * @internal */ - export const filterSensitiveLog = (obj: PipelineExecutionStepMetadata): any => ({ + export const filterSensitiveLog = (obj: ListMonitoringSchedulesResponse): any => ({ ...obj, }); } -export enum StepStatus { - EXECUTING = "Executing", - FAILED = "Failed", - STARTING = "Starting", - STOPPED = "Stopped", - STOPPING = "Stopping", - SUCCEEDED = "Succeeded", +export enum NotebookInstanceLifecycleConfigSortKey { + CREATION_TIME = "CreationTime", + LAST_MODIFIED_TIME = "LastModifiedTime", + NAME = "Name", } -/** - *An execution of a step in a pipeline.
- */ -export interface PipelineExecutionStep { +export enum NotebookInstanceLifecycleConfigSortOrder { + ASCENDING = "Ascending", + DESCENDING = "Descending", +} + +export interface ListNotebookInstanceLifecycleConfigsInput { /** - *The name of the step that is executed.
+ *If the result of a ListNotebookInstanceLifecycleConfigs
request was
+ * truncated, the response includes a NextToken
. To get the next set of
+ * lifecycle configurations, use the token in the next request.
The display name of the step.
+ *The maximum number of lifecycle configurations to return in the response.
*/ - StepDisplayName?: string; + MaxResults?: number; /** - *The description of the step.
+ *Sorts the list of results. The default is CreationTime
.
The time that the step started executing.
+ *The sort order for results.
*/ - StartTime?: Date; + SortOrder?: NotebookInstanceLifecycleConfigSortOrder | string; /** - *The time that the step stopped executing.
+ *A string in the lifecycle configuration name. This filter returns only lifecycle + * configurations whose name contains the specified string.
*/ - EndTime?: Date; + NameContains?: string; /** - *The status of the step execution.
+ *A filter that returns only lifecycle configurations that were created before the + * specified time (timestamp).
*/ - StepStatus?: StepStatus | string; + CreationTimeBefore?: Date; /** - *If this pipeline execution step was cached, details on the cache hit.
+ *A filter that returns only lifecycle configurations that were created after the + * specified time (timestamp).
*/ - CacheHitResult?: CacheHitResult; + CreationTimeAfter?: Date; /** - *The current attempt of the execution step. For more information, see Retry Policy for SageMaker Pipelines steps.
+ *A filter that returns only lifecycle configurations that were modified before the + * specified time (timestamp).
*/ - AttemptCount?: number; + LastModifiedTimeBefore?: Date; /** - *The reason why the step failed execution. This is only returned if the step failed its execution.
+ *A filter that returns only lifecycle configurations that were modified after the + * specified time (timestamp).
*/ - FailureReason?: string; + LastModifiedTimeAfter?: Date; +} +export namespace ListNotebookInstanceLifecycleConfigsInput { /** - *Metadata for the step execution.
+ * @internal */ - Metadata?: PipelineExecutionStepMetadata; + export const filterSensitiveLog = (obj: ListNotebookInstanceLifecycleConfigsInput): any => ({ + ...obj, + }); } -export namespace PipelineExecutionStep { +/** + *Provides a summary of a notebook instance lifecycle configuration.
+ */ +export interface NotebookInstanceLifecycleConfigSummary { + /** + *The name of the lifecycle configuration.
+ */ + NotebookInstanceLifecycleConfigName: string | undefined; + /** - * @internal + *The Amazon Resource Name (ARN) of the lifecycle configuration.
*/ - export const filterSensitiveLog = (obj: PipelineExecutionStep): any => ({ - ...obj, - }); -} + NotebookInstanceLifecycleConfigArn: string | undefined; -export interface ListPipelineExecutionStepsResponse { /** - *A list of PipeLineExecutionStep
objects. Each
- * PipeLineExecutionStep
consists of StepName, StartTime, EndTime, StepStatus,
- * and Metadata. Metadata is an object with properties for each job that contains relevant
- * information about the job created by the step.
A timestamp that tells when the lifecycle configuration was created.
*/ - PipelineExecutionSteps?: PipelineExecutionStep[]; + CreationTime?: Date; /** - *If the result of the previous ListPipelineExecutionSteps
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.
A timestamp that tells when the lifecycle configuration was last modified.
*/ - NextToken?: string; + LastModifiedTime?: Date; } -export namespace ListPipelineExecutionStepsResponse { +export namespace NotebookInstanceLifecycleConfigSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListPipelineExecutionStepsResponse): any => ({ + export const filterSensitiveLog = (obj: NotebookInstanceLifecycleConfigSummary): any => ({ ...obj, }); } -export interface ListPipelineParametersForExecutionRequest { - /** - *The Amazon Resource Name (ARN) of the pipeline execution.
- */ - PipelineExecutionArn: string | undefined; - +export interface ListNotebookInstanceLifecycleConfigsOutput { /** - *If the result of the previous ListPipelineParametersForExecution
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of parameters, use the token in the next request.
If the response is truncated, SageMaker returns this token. To get the next set of + * lifecycle configurations, use it in the next request.
*/ NextToken?: string; /** - *The maximum number of parameters to return in the response.
+ *An array of NotebookInstanceLifecycleConfiguration
objects, each listing
+ * a lifecycle configuration.
Assigns a value to a named Pipeline parameter.
- */ -export interface Parameter { +export enum NotebookInstanceSortKey { + CREATION_TIME = "CreationTime", + NAME = "Name", + STATUS = "Status", +} + +export enum NotebookInstanceSortOrder { + ASCENDING = "Ascending", + DESCENDING = "Descending", +} + +export interface ListNotebookInstancesInput { /** - *The name of the parameter to assign a value to. This - * parameter name must match a named parameter in the - * pipeline definition.
+ * If the previous call to the ListNotebookInstances
is truncated, the
+ * response includes a NextToken
. You can use this token in your subsequent
+ * ListNotebookInstances
request to fetch the next set of notebook
+ * instances.
You might specify a filter or a sort order in your request. When response is + * truncated, you must use the same values for the filer and sort order in the next + * request.
+ *The literal value for the parameter.
+ *The maximum number of notebook instances to return.
*/ - Value: string | undefined; -} + MaxResults?: number; -export namespace Parameter { /** - * @internal + *The field to sort results by. The default is Name
.
Contains a list of pipeline parameters. This list can be empty.
+ *The sort order for results.
*/ - PipelineParameters?: Parameter[]; + SortOrder?: NotebookInstanceSortOrder | string; /** - *If the result of the previous ListPipelineParametersForExecution
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of parameters, use the token in the next request.
A string in the notebook instances' name. This filter returns only notebook + * instances whose name contains the specified string.
*/ - NextToken?: string; -} + NameContains?: string; -export namespace ListPipelineParametersForExecutionResponse { /** - * @internal + *A filter that returns only notebook instances that were created before the + * specified time (timestamp).
*/ - export const filterSensitiveLog = (obj: ListPipelineParametersForExecutionResponse): any => ({ - ...obj, - }); -} - -export enum SortPipelinesBy { - CREATION_TIME = "CreationTime", - NAME = "Name", -} + CreationTimeBefore?: Date; -export interface ListPipelinesRequest { /** - *The prefix of the pipeline name.
+ *A filter that returns only notebook instances that were created after the specified + * time (timestamp).
*/ - PipelineNamePrefix?: string; + CreationTimeAfter?: Date; /** - *A filter that returns the pipelines that were created after a specified - * time.
+ *A filter that returns only notebook instances that were modified before the + * specified time (timestamp).
*/ - CreatedAfter?: Date; + LastModifiedTimeBefore?: Date; /** - *A filter that returns the pipelines that were created before a specified - * time.
+ *A filter that returns only notebook instances that were modified after the + * specified time (timestamp).
*/ - CreatedBefore?: Date; + LastModifiedTimeAfter?: Date; /** - *The field by which to sort results. The default is CreatedTime
.
A filter that returns only notebook instances with the specified status.
*/ - SortBy?: SortPipelinesBy | string; + StatusEquals?: NotebookInstanceStatus | string; /** - *The sort order for results.
+ *A string in the name of a notebook instances lifecycle configuration associated with + * this notebook instance. This filter returns only notebook instances associated with a + * lifecycle configuration with a name that contains the specified string.
*/ - SortOrder?: SortOrder | string; + NotebookInstanceLifecycleConfigNameContains?: string; /** - *If the result of the previous ListPipelines
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipelines, use the token in the next request.
A string in the name or URL of a Git repository associated with this notebook + * instance. This filter returns only notebook instances associated with a git repository + * with a name that contains the specified string.
*/ - NextToken?: string; + DefaultCodeRepositoryContains?: string; /** - *The maximum number of pipelines to return in the response.
+ *A filter that returns only notebook instances with associated with the specified git + * repository.
*/ - MaxResults?: number; + AdditionalCodeRepositoryEquals?: string; } -export namespace ListPipelinesRequest { +export namespace ListNotebookInstancesInput { /** * @internal */ - export const filterSensitiveLog = (obj: ListPipelinesRequest): any => ({ + export const filterSensitiveLog = (obj: ListNotebookInstancesInput): any => ({ ...obj, }); } /** - *A summary of a pipeline.
+ *Provides summary information for an SageMaker notebook instance.
*/ -export interface PipelineSummary { +export interface NotebookInstanceSummary { /** - *The Amazon Resource Name (ARN) of the pipeline.
+ *The name of the notebook instance that you want a summary for.
*/ - PipelineArn?: string; + NotebookInstanceName: string | undefined; /** - *The name of the pipeline.
+ *The Amazon Resource Name (ARN) of the notebook instance.
*/ - PipelineName?: string; + NotebookInstanceArn: string | undefined; /** - *The display name of the pipeline.
+ *The status of the notebook instance.
*/ - PipelineDisplayName?: string; + NotebookInstanceStatus?: NotebookInstanceStatus | string; /** - *The description of the pipeline.
+ *The URL that you use to connect to the Jupyter notebook running in your notebook + * instance.
*/ - PipelineDescription?: string; + Url?: string; /** - *The Amazon Resource Name (ARN) that the pipeline used to execute.
+ *The type of ML compute instance that the notebook instance is running on.
*/ - RoleArn?: string; + InstanceType?: _InstanceType | string; /** - *The creation time of the pipeline.
+ *A timestamp that shows when the notebook instance was created.
*/ CreationTime?: Date; /** - *The time that the pipeline was last modified.
+ *A timestamp that shows when the notebook instance was last modified.
*/ LastModifiedTime?: Date; /** - *The last time that a pipeline execution began.
- */ - LastExecutionTime?: Date; -} - -export namespace PipelineSummary { - /** - * @internal + *The name of a notebook instance lifecycle configuration associated with this notebook + * instance.
+ *For information about notebook instance lifestyle configurations, see Step + * 2.1: (Optional) Customize a Notebook Instance.
*/ - export const filterSensitiveLog = (obj: PipelineSummary): any => ({ - ...obj, - }); -} + NotebookInstanceLifecycleConfigName?: string; -export interface ListPipelinesResponse { /** - *Contains a sorted list of PipelineSummary
objects matching the specified
- * filters. Each PipelineSummary
consists of PipelineArn, PipelineName,
- * ExperimentName, PipelineDescription, CreationTime, LastModifiedTime, LastRunTime, and
- * RoleArn. This list can be empty.
The Git repository associated with the notebook instance as its default code + * repository. This can be either the name of a Git repository stored as a resource in your + * account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any + * other Git repository. When you open a notebook instance, it opens in the directory that + * contains this repository. For more information, see Associating Git Repositories with SageMaker + * Notebook Instances.
*/ - PipelineSummaries?: PipelineSummary[]; + DefaultCodeRepository?: string; /** - *If the result of the previous ListPipelines
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of pipelines, use the token in the next request.
An array of up to three Git repositories associated with the notebook instance. These + * can be either the names of Git repositories stored as resources in your account, or the + * URL of Git repositories in Amazon Web Services CodeCommit or in any + * other Git repository. These repositories are cloned at the same level as the default + * repository of your notebook instance. For more information, see Associating Git + * Repositories with SageMaker Notebook Instances.
*/ - NextToken?: string; + AdditionalCodeRepositories?: string[]; } -export namespace ListPipelinesResponse { +export namespace NotebookInstanceSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ListPipelinesResponse): any => ({ + export const filterSensitiveLog = (obj: NotebookInstanceSummary): any => ({ ...obj, }); } -export interface ListProcessingJobsRequest { +export interface ListNotebookInstancesOutput { /** - *A filter that returns only processing jobs created after the specified time.
+ *If the response to the previous ListNotebookInstances
request was
+ * truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use
+ * the token in the next request.
A filter that returns only processing jobs created after the specified time.
+ *An array of NotebookInstanceSummary
objects, one for each notebook
+ * instance.
A filter that returns only processing jobs modified after the specified time.
+ * @internal */ - LastModifiedTimeAfter?: Date; + export const filterSensitiveLog = (obj: ListNotebookInstancesOutput): any => ({ + ...obj, + }); +} + +export enum SortPipelineExecutionsBy { + CREATION_TIME = "CreationTime", + PIPELINE_EXECUTION_ARN = "PipelineExecutionArn", +} +export interface ListPipelineExecutionsRequest { /** - *A filter that returns only processing jobs modified before the specified time.
+ *The name of the pipeline.
*/ - LastModifiedTimeBefore?: Date; + PipelineName: string | undefined; /** - *A string in the processing job name. This filter returns only processing jobs whose - * name contains the specified string.
+ *A filter that returns the pipeline executions that were created after a specified + * time.
*/ - NameContains?: string; + CreatedAfter?: Date; /** - *A filter that retrieves only processing jobs with a specific status.
+ *A filter that returns the pipeline executions that were created before a specified + * time.
*/ - StatusEquals?: ProcessingJobStatus | string; + CreatedBefore?: Date; /** - *The field to sort results by. The default is CreationTime
.
The field by which to sort results. The default is CreatedTime
.
The sort order for results. The default is Ascending
.
The sort order for results.
*/ SortOrder?: SortOrder | string; /** - *If the result of the previous ListProcessingJobs
request was truncated,
- * the response includes a NextToken
. To retrieve the next set of processing
- * jobs, use the token in the next request.
If the result of the previous ListPipelineExecutions
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipeline executions, use the token in the next request.
The maximum number of processing jobs to return in the response.
+ *The maximum number of pipeline executions to return in the response.
*/ MaxResults?: number; } -export namespace ListProcessingJobsRequest { +export namespace ListPipelineExecutionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListProcessingJobsRequest): any => ({ + export const filterSensitiveLog = (obj: ListPipelineExecutionsRequest): any => ({ ...obj, }); } /** - *Summary of information about a processing job.
+ *A pipeline execution summary.
*/ -export interface ProcessingJobSummary { - /** - *The name of the processing job.
- */ - ProcessingJobName: string | undefined; - - /** - *The Amazon Resource Name (ARN) of the processing job..
- */ - ProcessingJobArn: string | undefined; - +export interface PipelineExecutionSummary { /** - *The time at which the processing job was created.
+ *The Amazon Resource Name (ARN) of the pipeline execution.
*/ - CreationTime: Date | undefined; + PipelineExecutionArn?: string; /** - *The time at which the processing job completed.
+ *The start time of the pipeline execution.
*/ - ProcessingEndTime?: Date; + StartTime?: Date; /** - *A timestamp that indicates the last time the processing job was modified.
+ *The status of the pipeline execution.
*/ - LastModifiedTime?: Date; + PipelineExecutionStatus?: PipelineExecutionStatus | string; /** - *The status of the processing job.
+ *The description of the pipeline execution.
*/ - ProcessingJobStatus: ProcessingJobStatus | string | undefined; + PipelineExecutionDescription?: string; /** - *A string, up to one KB in size, that contains the reason a processing job failed, if - * it failed.
+ *The display name of the pipeline execution.
*/ - FailureReason?: string; + PipelineExecutionDisplayName?: string; /** - *An optional string, up to one KB in size, that contains metadata from the processing - * container when the processing job exits.
+ *A message generated by SageMaker Pipelines describing why the pipeline execution failed.
*/ - ExitMessage?: string; + PipelineExecutionFailureReason?: string; } -export namespace ProcessingJobSummary { +export namespace PipelineExecutionSummary { /** * @internal */ - export const filterSensitiveLog = (obj: ProcessingJobSummary): any => ({ + export const filterSensitiveLog = (obj: PipelineExecutionSummary): any => ({ ...obj, }); } -export interface ListProcessingJobsResponse { +export interface ListPipelineExecutionsResponse { /** - *An array of ProcessingJobSummary
objects, each listing a processing
- * job.
Contains a sorted list of pipeline execution summary objects matching the specified + * filters. Each run summary includes the Amazon Resource Name (ARN) of the pipeline execution, the run date, + * and the status. This list can be empty.
*/ - ProcessingJobSummaries: ProcessingJobSummary[] | undefined; + PipelineExecutionSummaries?: PipelineExecutionSummary[]; /** - *If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of - * processing jobs, use it in the subsequent request.
+ *If the result of the previous ListPipelineExecutions
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipeline executions, use the token in the next request.
The Amazon Resource Name (ARN) of the pipeline execution.
+ */ + PipelineExecutionArn?: string; + + /** + *If the result of the previous ListPipelineExecutionSteps
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.
The maximum number of pipeline execution steps to return in the response.
+ */ + MaxResults?: number; + + /** + *The field by which to sort results. The default is CreatedTime
.
Metadata for Model steps.
+ */ +export interface ModelStepMetadata { + /** + *The Amazon Resource Name (ARN) of the created model.
+ */ + Arn?: string; +} + +export namespace ModelStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModelStepMetadata): any => ({ + ...obj, + }); +} + +/** + *Metadata for a processing job step.
+ */ +export interface ProcessingJobStepMetadata { + /** + *The Amazon Resource Name (ARN) of the processing job.
+ */ + Arn?: string; +} + +export namespace ProcessingJobStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProcessingJobStepMetadata): any => ({ + ...obj, + }); +} + +/** + *Container for the metadata for a Quality check step. For more information, see + * the topic on QualityCheck step in the Amazon SageMaker Developer Guide. + *
+ */ +export interface QualityCheckStepMetadata { + /** + *The type of the Quality check step.
+ */ + CheckType?: string; + + /** + *The Amazon S3 URI of the baseline statistics file used for the drift check.
+ */ + BaselineUsedForDriftCheckStatistics?: string; + + /** + *The Amazon S3 URI of the baseline constraints file used for the drift check.
+ */ + BaselineUsedForDriftCheckConstraints?: string; + + /** + *The Amazon S3 URI of the newly calculated baseline statistics file.
+ */ + CalculatedBaselineStatistics?: string; + + /** + *The Amazon S3 URI of the newly calculated baseline constraints file.
+ */ + CalculatedBaselineConstraints?: string; + + /** + *The model package group name.
+ */ + ModelPackageGroupName?: string; + + /** + *The Amazon S3 URI of violation report if violations are detected.
+ */ + ViolationReport?: string; + + /** + *The Amazon Resource Name (ARN) of the Quality check processing job that was run by this step execution.
+ */ + CheckJobArn?: string; + + /** + *This flag indicates if the drift check against the previous baseline will be skipped or not.
+ * If it is set to False
, the previous baseline of the configured check type must be available.
This flag indicates if a newly calculated baseline can be accessed through step properties
+ * BaselineUsedForDriftCheckConstraints
and BaselineUsedForDriftCheckStatistics
.
+ * If it is set to False
, the previous baseline of the configured check type must also be available.
+ * These can be accessed through the BaselineUsedForDriftCheckConstraints
and
+ * BaselineUsedForDriftCheckStatistics
properties.
Metadata for a register model job step.
+ */ +export interface RegisterModelStepMetadata { + /** + *The Amazon Resource Name (ARN) of the model package.
+ */ + Arn?: string; +} + +export namespace RegisterModelStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RegisterModelStepMetadata): any => ({ + ...obj, + }); +} + +/** + *Metadata for a training job step.
+ */ +export interface TrainingJobStepMetadata { + /** + *The Amazon Resource Name (ARN) of the training job that was run by this step execution.
+ */ + Arn?: string; +} + +export namespace TrainingJobStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TrainingJobStepMetadata): any => ({ + ...obj, + }); +} + +/** + *Metadata for a transform job step.
+ */ +export interface TransformJobStepMetadata { + /** + *The Amazon Resource Name (ARN) of the transform job that was run by this step execution.
+ */ + Arn?: string; +} + +export namespace TransformJobStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TransformJobStepMetadata): any => ({ + ...obj, + }); +} + +/** + *Metadata for a tuning step.
+ */ +export interface TuningJobStepMetaData { + /** + *The Amazon Resource Name (ARN) of the tuning job that was run by this step execution.
+ */ + Arn?: string; +} + +export namespace TuningJobStepMetaData { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TuningJobStepMetaData): any => ({ + ...obj, + }); +} + +/** + *Metadata for a step execution.
+ */ +export interface PipelineExecutionStepMetadata { + /** + *The Amazon Resource Name (ARN) of the training job that was run by this step execution.
+ */ + TrainingJob?: TrainingJobStepMetadata; + + /** + *The Amazon Resource Name (ARN) of the processing job that was run by this step execution.
+ */ + ProcessingJob?: ProcessingJobStepMetadata; + + /** + *The Amazon Resource Name (ARN) of the transform job that was run by this step execution.
+ */ + TransformJob?: TransformJobStepMetadata; + + /** + *The Amazon Resource Name (ARN) of the tuning job that was run by this step execution.
+ */ + TuningJob?: TuningJobStepMetaData; + + /** + *The Amazon Resource Name (ARN) of the model that was created by this step execution.
+ */ + Model?: ModelStepMetadata; + + /** + *The Amazon Resource Name (ARN) of the model package the model was registered to by this step execution.
+ */ + RegisterModel?: RegisterModelStepMetadata; + + /** + *The outcome of the condition evaluation that was run by this step execution.
+ */ + Condition?: ConditionStepMetadata; + + /** + *The URL of the Amazon SQS queue used by this step execution, the pipeline generated token, + * and a list of output parameters.
+ */ + Callback?: CallbackStepMetadata; + + /** + *The Amazon Resource Name (ARN) of the Lambda function that was run by this step execution and a list of + * output parameters.
+ */ + Lambda?: LambdaStepMetadata; + + /** + *The configurations and outcomes of the check step execution. This includes:
+ *The type of the check conducted,
+ *The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check.
+ *The Amazon S3 URIs of newly calculated baseline constraints and statistics.
+ *The model package group name provided.
+ *The Amazon S3 URI of the violation report if violations detected.
+ *The Amazon Resource Name (ARN) of check processing job initiated by the step execution.
+ *The boolean flags indicating if the drift check is skipped.
+ *If step property BaselineUsedForDriftCheck
is set the same as
+ * CalculatedBaseline
.
Container for the metadata for a Clarify check step. The configurations + * and outcomes of the check step execution. This includes:
+ *The type of the check conducted,
+ *The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check.
+ *The Amazon S3 URIs of newly calculated baseline constraints and statistics.
+ *The model package group name provided.
+ *The Amazon S3 URI of the violation report if violations detected.
+ *The Amazon Resource Name (ARN) of check processing job initiated by the step execution.
+ *The boolean flags indicating if the drift check is skipped.
+ *If step property BaselineUsedForDriftCheck
is set the same as
+ * CalculatedBaseline
.
The configurations and outcomes of an EMR step execution.
+ */ + EMR?: EMRStepMetadata; + + /** + *The configurations and outcomes of a Fail step execution.
+ */ + Fail?: FailStepMetadata; +} + +export namespace PipelineExecutionStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PipelineExecutionStepMetadata): any => ({ + ...obj, + }); +} + +export enum StepStatus { + EXECUTING = "Executing", + FAILED = "Failed", + STARTING = "Starting", + STOPPED = "Stopped", + STOPPING = "Stopping", + SUCCEEDED = "Succeeded", +} + +/** + *An execution of a step in a pipeline.
+ */ +export interface PipelineExecutionStep { + /** + *The name of the step that is executed.
+ */ + StepName?: string; + + /** + *The display name of the step.
+ */ + StepDisplayName?: string; + + /** + *The description of the step.
+ */ + StepDescription?: string; + + /** + *The time that the step started executing.
+ */ + StartTime?: Date; + + /** + *The time that the step stopped executing.
+ */ + EndTime?: Date; + + /** + *The status of the step execution.
+ */ + StepStatus?: StepStatus | string; + + /** + *If this pipeline execution step was cached, details on the cache hit.
+ */ + CacheHitResult?: CacheHitResult; + + /** + *The current attempt of the execution step. For more information, see Retry Policy for SageMaker Pipelines steps.
+ */ + AttemptCount?: number; + + /** + *The reason why the step failed execution. This is only returned if the step failed its execution.
+ */ + FailureReason?: string; + + /** + *Metadata for the step execution.
+ */ + Metadata?: PipelineExecutionStepMetadata; +} + +export namespace PipelineExecutionStep { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PipelineExecutionStep): any => ({ + ...obj, + }); +} + +export interface ListPipelineExecutionStepsResponse { + /** + *A list of PipeLineExecutionStep
objects. Each
+ * PipeLineExecutionStep
consists of StepName, StartTime, EndTime, StepStatus,
+ * and Metadata. Metadata is an object with properties for each job that contains relevant
+ * information about the job created by the step.
If the result of the previous ListPipelineExecutionSteps
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.
The Amazon Resource Name (ARN) of the pipeline execution.
+ */ + PipelineExecutionArn: string | undefined; + + /** + *If the result of the previous ListPipelineParametersForExecution
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of parameters, use the token in the next request.
The maximum number of parameters to return in the response.
+ */ + MaxResults?: number; +} + +export namespace ListPipelineParametersForExecutionRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPipelineParametersForExecutionRequest): any => ({ + ...obj, + }); +} + +/** + *Assigns a value to a named Pipeline parameter.
+ */ +export interface Parameter { + /** + *The name of the parameter to assign a value to. This + * parameter name must match a named parameter in the + * pipeline definition.
+ */ + Name: string | undefined; + + /** + *The literal value for the parameter.
+ */ + Value: string | undefined; +} + +export namespace Parameter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Parameter): any => ({ + ...obj, + }); +} + +export interface ListPipelineParametersForExecutionResponse { + /** + *Contains a list of pipeline parameters. This list can be empty.
+ */ + PipelineParameters?: Parameter[]; + + /** + *If the result of the previous ListPipelineParametersForExecution
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of parameters, use the token in the next request.
The prefix of the pipeline name.
+ */ + PipelineNamePrefix?: string; + + /** + *A filter that returns the pipelines that were created after a specified + * time.
+ */ + CreatedAfter?: Date; + + /** + *A filter that returns the pipelines that were created before a specified + * time.
+ */ + CreatedBefore?: Date; + + /** + *The field by which to sort results. The default is CreatedTime
.
The sort order for results.
+ */ + SortOrder?: SortOrder | string; + + /** + *If the result of the previous ListPipelines
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipelines, use the token in the next request.
The maximum number of pipelines to return in the response.
+ */ + MaxResults?: number; +} + +export namespace ListPipelinesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPipelinesRequest): any => ({ + ...obj, + }); +} + +/** + *A summary of a pipeline.
+ */ +export interface PipelineSummary { + /** + *The Amazon Resource Name (ARN) of the pipeline.
+ */ + PipelineArn?: string; + + /** + *The name of the pipeline.
+ */ + PipelineName?: string; + + /** + *The display name of the pipeline.
+ */ + PipelineDisplayName?: string; + + /** + *The description of the pipeline.
+ */ + PipelineDescription?: string; + + /** + *The Amazon Resource Name (ARN) that the pipeline used to execute.
+ */ + RoleArn?: string; + + /** + *The creation time of the pipeline.
+ */ + CreationTime?: Date; + + /** + *The time that the pipeline was last modified.
+ */ + LastModifiedTime?: Date; + + /** + *The last time that a pipeline execution began.
+ */ + LastExecutionTime?: Date; +} + +export namespace PipelineSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PipelineSummary): any => ({ + ...obj, + }); +} + +export interface ListPipelinesResponse { + /** + *Contains a sorted list of PipelineSummary
objects matching the specified
+ * filters. Each PipelineSummary
consists of PipelineArn, PipelineName,
+ * ExperimentName, PipelineDescription, CreationTime, LastModifiedTime, LastRunTime, and
+ * RoleArn. This list can be empty.
If the result of the previous ListPipelines
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of pipelines, use the token in the next request.
A filter that returns only processing jobs created after the specified time.
+ */ + CreationTimeAfter?: Date; + + /** + *A filter that returns only processing jobs created after the specified time.
+ */ + CreationTimeBefore?: Date; + + /** + *A filter that returns only processing jobs modified after the specified time.
+ */ + LastModifiedTimeAfter?: Date; + + /** + *A filter that returns only processing jobs modified before the specified time.
+ */ + LastModifiedTimeBefore?: Date; + + /** + *A string in the processing job name. This filter returns only processing jobs whose + * name contains the specified string.
+ */ + NameContains?: string; + + /** + *A filter that retrieves only processing jobs with a specific status.
+ */ + StatusEquals?: ProcessingJobStatus | string; + + /** + *The field to sort results by. The default is CreationTime
.
The sort order for results. The default is Ascending
.
If the result of the previous ListProcessingJobs
request was truncated,
+ * the response includes a NextToken
. To retrieve the next set of processing
+ * jobs, use the token in the next request.
The maximum number of processing jobs to return in the response.
+ */ + MaxResults?: number; +} + +export namespace ListProcessingJobsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListProcessingJobsRequest): any => ({ + ...obj, + }); +} + +/** + *Summary of information about a processing job.
+ */ +export interface ProcessingJobSummary { + /** + *The name of the processing job.
+ */ + ProcessingJobName: string | undefined; + + /** + *The Amazon Resource Name (ARN) of the processing job..
+ */ + ProcessingJobArn: string | undefined; + + /** + *The time at which the processing job was created.
+ */ + CreationTime: Date | undefined; + + /** + *The time at which the processing job completed.
+ */ + ProcessingEndTime?: Date; + + /** + *A timestamp that indicates the last time the processing job was modified.
+ */ + LastModifiedTime?: Date; + + /** + *The status of the processing job.
+ */ + ProcessingJobStatus: ProcessingJobStatus | string | undefined; + + /** + *A string, up to one KB in size, that contains the reason a processing job failed, if + * it failed.
+ */ + FailureReason?: string; + + /** + *An optional string, up to one KB in size, that contains metadata from the processing + * container when the processing job exits.
+ */ + ExitMessage?: string; +} + +export namespace ProcessingJobSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProcessingJobSummary): any => ({ + ...obj, + }); +} + +export interface ListProcessingJobsResponse { + /** + *An array of ProcessingJobSummary
objects, each listing a processing
+ * job.
If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of + * processing jobs, use it in the subsequent request.
+ */ + NextToken?: string; +} + +export namespace ListProcessingJobsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListProcessingJobsResponse): any => ({ + ...obj, + }); +} + export enum ProjectSortBy { CREATION_TIME = "CreationTime", NAME = "Name", @@ -237,6 +1046,63 @@ export namespace ListProjectsOutput { }); } +export interface ListStageDevicesRequest { + /** + *The response from the last list when returning a list large enough to neeed tokening.
+ */ + NextToken?: string; + + /** + *The maximum number of requests to select.
+ */ + MaxResults?: number; + + /** + *The name of the edge deployment plan.
+ */ + EdgeDeploymentPlanName: string | undefined; + + /** + *Toggle for excluding devices deployed in other stages.
+ */ + ExcludeDevicesDeployedInOtherStage?: boolean; + + /** + *The name of the stage in the deployment.
+ */ + StageName: string | undefined; +} + +export namespace ListStageDevicesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListStageDevicesRequest): any => ({ + ...obj, + }); +} + +export interface ListStageDevicesResponse { + /** + *List of summaries of devices allocated to the stage.
+ */ + DeviceDeploymentSummaries: DeviceDeploymentSummary[] | undefined; + + /** + *The token to use when calling the next page of results.
+ */ + NextToken?: string; +} + +export namespace ListStageDevicesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListStageDevicesResponse): any => ({ + ...obj, + }); +} + export enum StudioLifecycleConfigSortKey { CreationTime = "CreationTime", LastModifiedTime = "LastModifiedTime", @@ -2230,7 +3096,7 @@ export interface QueryLineageRequest { /** *A list of resource Amazon Resource Name (ARN) that represent the starting point for your lineage query.
*/ - StartArns: string[] | undefined; + StartArns?: string[]; /** *Associations between lineage entities have a direction. This parameter determines the direction from the @@ -3563,6 +4429,27 @@ export namespace SendPipelineExecutionStepSuccessResponse { }); } +export interface StartEdgeDeploymentStageRequest { + /** + *
The name of the edge deployment plan to start.
+ */ + EdgeDeploymentPlanName: string | undefined; + + /** + *The name of the stage to start.
+ */ + StageName: string | undefined; +} + +export namespace StartEdgeDeploymentStageRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartEdgeDeploymentStageRequest): any => ({ + ...obj, + }); +} + export interface StartMonitoringScheduleRequest { /** *The name of the schedule to start.
@@ -3686,6 +4573,27 @@ export namespace StopCompilationJobRequest { }); } +export interface StopEdgeDeploymentStageRequest { + /** + *The name of the edge deployment plan to stop.
+ */ + EdgeDeploymentPlanName: string | undefined; + + /** + *The name of the stage to stop.
+ */ + StageName: string | undefined; +} + +export namespace StopEdgeDeploymentStageRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StopEdgeDeploymentStageRequest): any => ({ + ...obj, + }); +} + export interface StopEdgePackagingJobRequest { /** *The name of the edge packaging job.
diff --git a/clients/client-sagemaker/src/pagination/ListEdgeDeploymentPlansPaginator.ts b/clients/client-sagemaker/src/pagination/ListEdgeDeploymentPlansPaginator.ts new file mode 100644 index 0000000000000..6a4c3e0efb5bf --- /dev/null +++ b/clients/client-sagemaker/src/pagination/ListEdgeDeploymentPlansPaginator.ts @@ -0,0 +1,61 @@ +// smithy-typescript generated code +import { Paginator } from "@aws-sdk/types"; + +import { + ListEdgeDeploymentPlansCommand, + ListEdgeDeploymentPlansCommandInput, + ListEdgeDeploymentPlansCommandOutput, +} from "../commands/ListEdgeDeploymentPlansCommand"; +import { SageMaker } from "../SageMaker"; +import { SageMakerClient } from "../SageMakerClient"; +import { SageMakerPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: SageMakerClient, + input: ListEdgeDeploymentPlansCommandInput, + ...args: any +): PromiseAmazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO -access to multiple Amazon Web Services accounts and business applications. This guide provides information on -SSO operations which could be used for access management of Amazon Web Services accounts. For information about -Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide.
-Many operations in the SSO APIs rely on identifiers for users and groups, known as -principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO, -see the Amazon Web Services SSO Identity Store API -Reference.
+ ## Installing @@ -33,16 +26,19 @@ using your favorite package manager: The AWS SDK is modulized by clients and commands. To send a request, you only need to import the `SSOAdminClient` and -the commands you need, for example `AttachManagedPolicyToPermissionSetCommand`: +the commands you need, for example `AttachCustomerManagedPolicyReferenceToPermissionSetCommand`: ```js // ES5 example -const { SSOAdminClient, AttachManagedPolicyToPermissionSetCommand } = require("@aws-sdk/client-sso-admin"); +const { + SSOAdminClient, + AttachCustomerManagedPolicyReferenceToPermissionSetCommand, +} = require("@aws-sdk/client-sso-admin"); ``` ```ts // ES6+ example -import { SSOAdminClient, AttachManagedPolicyToPermissionSetCommand } from "@aws-sdk/client-sso-admin"; +import { SSOAdminClient, AttachCustomerManagedPolicyReferenceToPermissionSetCommand } from "@aws-sdk/client-sso-admin"; ``` ### Usage @@ -61,7 +57,7 @@ const client = new SSOAdminClient({ region: "REGION" }); const params = { /** input parameters */ }; -const command = new AttachManagedPolicyToPermissionSetCommand(params); +const command = new AttachCustomerManagedPolicyReferenceToPermissionSetCommand(params); ``` #### Async/await @@ -140,7 +136,7 @@ const client = new AWS.SSOAdmin({ region: "REGION" }); // async/await. try { - const data = await client.attachManagedPolicyToPermissionSet(params); + const data = await client.attachCustomerManagedPolicyReferenceToPermissionSet(params); // process data. } catch (error) { // error handling. @@ -148,7 +144,7 @@ try { // Promises. client - .attachManagedPolicyToPermissionSet(params) + .attachCustomerManagedPolicyReferenceToPermissionSet(params) .then((data) => { // process data. }) @@ -157,7 +153,7 @@ client }); // callbacks. -client.attachManagedPolicyToPermissionSet(params, (err, data) => { +client.attachCustomerManagedPolicyReferenceToPermissionSet(params, (err, data) => { // process err and data. }); ``` diff --git a/clients/client-sso-admin/package.json b/clients/client-sso-admin/package.json index 6def89942002d..8ef2e5441695c 100644 --- a/clients/client-sso-admin/package.json +++ b/clients/client-sso-admin/package.json @@ -1,7 +1,7 @@ { "name": "@aws-sdk/client-sso-admin", "description": "AWS SDK for JavaScript Sso Admin Client for Node.js, Browser and React Native", - "version": "3.131.0", + "version": "3.132.0", "scripts": { "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", "build:cjs": "tsc -p tsconfig.cjs.json", diff --git a/clients/client-sso-admin/src/SSOAdmin.ts b/clients/client-sso-admin/src/SSOAdmin.ts index 00a9c6a0171af..f32c8c2a7eb71 100644 --- a/clients/client-sso-admin/src/SSOAdmin.ts +++ b/clients/client-sso-admin/src/SSOAdmin.ts @@ -1,6 +1,11 @@ // smithy-typescript generated code import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; +import { + AttachCustomerManagedPolicyReferenceToPermissionSetCommand, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + AttachCustomerManagedPolicyReferenceToPermissionSetCommandOutput, +} from "./commands/AttachCustomerManagedPolicyReferenceToPermissionSetCommand"; import { AttachManagedPolicyToPermissionSetCommand, AttachManagedPolicyToPermissionSetCommandInput, @@ -36,6 +41,11 @@ import { DeleteInstanceAccessControlAttributeConfigurationCommandInput, DeleteInstanceAccessControlAttributeConfigurationCommandOutput, } from "./commands/DeleteInstanceAccessControlAttributeConfigurationCommand"; +import { + DeletePermissionsBoundaryFromPermissionSetCommand, + DeletePermissionsBoundaryFromPermissionSetCommandInput, + DeletePermissionsBoundaryFromPermissionSetCommandOutput, +} from "./commands/DeletePermissionsBoundaryFromPermissionSetCommand"; import { DeletePermissionSetCommand, DeletePermissionSetCommandInput, @@ -66,6 +76,11 @@ import { DescribePermissionSetProvisioningStatusCommandInput, DescribePermissionSetProvisioningStatusCommandOutput, } from "./commands/DescribePermissionSetProvisioningStatusCommand"; +import { + DetachCustomerManagedPolicyReferenceFromPermissionSetCommand, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandInput, + DetachCustomerManagedPolicyReferenceFromPermissionSetCommandOutput, +} from "./commands/DetachCustomerManagedPolicyReferenceFromPermissionSetCommand"; import { DetachManagedPolicyFromPermissionSetCommand, DetachManagedPolicyFromPermissionSetCommandInput, @@ -76,6 +91,11 @@ import { GetInlinePolicyForPermissionSetCommandInput, GetInlinePolicyForPermissionSetCommandOutput, } from "./commands/GetInlinePolicyForPermissionSetCommand"; +import { + GetPermissionsBoundaryForPermissionSetCommand, + GetPermissionsBoundaryForPermissionSetCommandInput, + GetPermissionsBoundaryForPermissionSetCommandOutput, +} from "./commands/GetPermissionsBoundaryForPermissionSetCommand"; import { ListAccountAssignmentCreationStatusCommand, ListAccountAssignmentCreationStatusCommandInput, @@ -96,6 +116,11 @@ import { ListAccountsForProvisionedPermissionSetCommandInput, ListAccountsForProvisionedPermissionSetCommandOutput, } from "./commands/ListAccountsForProvisionedPermissionSetCommand"; +import { + ListCustomerManagedPolicyReferencesInPermissionSetCommand, + ListCustomerManagedPolicyReferencesInPermissionSetCommandInput, + ListCustomerManagedPolicyReferencesInPermissionSetCommandOutput, +} from "./commands/ListCustomerManagedPolicyReferencesInPermissionSetCommand"; import { ListInstancesCommand, ListInstancesCommandInput, @@ -136,6 +161,11 @@ import { PutInlinePolicyToPermissionSetCommandInput, PutInlinePolicyToPermissionSetCommandOutput, } from "./commands/PutInlinePolicyToPermissionSetCommand"; +import { + PutPermissionsBoundaryToPermissionSetCommand, + PutPermissionsBoundaryToPermissionSetCommandInput, + PutPermissionsBoundaryToPermissionSetCommandOutput, +} from "./commands/PutPermissionsBoundaryToPermissionSetCommand"; import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommand, @@ -155,18 +185,45 @@ import { import { SSOAdminClient } from "./SSOAdminClient"; /** - *Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO - * access to multiple Amazon Web Services accounts and business applications. This guide provides information on - * SSO operations which could be used for access management of Amazon Web Services accounts. For information about - * Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide.
- *Many operations in the SSO APIs rely on identifiers for users and groups, known as - * principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO, - * see the Amazon Web Services SSO Identity Store API - * Reference.
+ * */ export class SSOAdmin extends SSOAdminClient { /** - *Attaches an IAM managed policy ARN to a permission set.
+ *Attaches the specified IAM customer managed policy to the specified PermissionSet.
+ */ + public attachCustomerManagedPolicyReferenceToPermissionSet( + args: AttachCustomerManagedPolicyReferenceToPermissionSetCommandInput, + options?: __HttpHandlerOptions + ): PromiseAttaches an Amazon Web Services managed IAM policy ARN to a permission set.
*If the permission set is already referenced by one or more account assignments, you will
* need to call Deletes the permissions boundary from a specified PermissionSet. Deletes the specified permission set. Detaches the attached IAM managed policy ARN from the specified permission set. Detaches the specified IAM customer managed policy from the specified PermissionSet. Detaches the attached Amazon Web Services managed IAM policy ARN from the specified permission set. Obtains the permissions boundary for a specified PermissionSet. Lists the status of the Amazon Web Services account assignment creation requests for a specified SSO
* instance. Lists all IAM customer managed policies attached to a specified PermissionSet. Lists the SSO instances that the caller has access to. Lists the IAM managed policy that is attached to a specified permission set. Lists the Amazon Web Services managed IAM policy that is attached to a specified permission set. Attaches an Amazon Web Services managed or customer managed IAM policy to the specified PermissionSet as a permissions boundary. Associates a set of tags with a specified resource. Amazon Web Services Single Sign On (SSO) is a cloud SSO service that makes it easy to centrally manage SSO
- * access to multiple Amazon Web Services accounts and business applications. This guide provides information on
- * SSO operations which could be used for access management of Amazon Web Services accounts. For information about
- * Amazon Web Services SSO features, see the Amazon Web Services Single Sign-On User Guide. Many operations in the SSO APIs rely on identifiers for users and groups, known as
- * principals. For more information about how to work with principals and principal IDs in Amazon Web Services SSO,
- * see the Amazon Web Services SSO Identity Store API
- * Reference. Attaches the specified IAM customer managed policy to the specified PermissionSet. Attaches an IAM managed policy ARN to a permission set. Attaches an Amazon Web Services managed IAM policy ARN to a permission set. If the permission set is already referenced by one or more account assignments, you will
* need to call Deletes the permissions boundary from a specified PermissionSet. Detaches the specified IAM customer managed policy from the specified PermissionSet. Detaches the attached IAM managed policy ARN from the specified permission set. Detaches the attached Amazon Web Services managed IAM policy ARN from the specified permission set. Obtains the permissions boundary for a specified PermissionSet. Lists all IAM customer managed policies attached to a specified PermissionSet. Lists the IAM managed policy that is attached to a specified permission set. Lists the Amazon Web Services managed IAM policy that is attached to a specified permission set. Attaches an Amazon Web Services managed or customer managed IAM policy to the specified PermissionSet as a permissions boundary. The value used for mapping a specified attribute to an identity source. The value used for mapping a specified attribute to an identity source. For more information, see Attribute mappings in the Amazon Web Services Single Sign-On User Guide. A structure that stores the details of the IAM managed policy. Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. The name of the IAM managed policy. The name of the policy document. The ARN of the IAM managed policy. For more information about ARNs, see Amazon Resource
- * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. The path for the policy. The default is The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
- * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. The ARN of the SSO instance under which the operation will be executed. The ARN of the PermissionSet that the managed policy should be attached
- * to. The ARN of the The IAM managed policy ARN to be attached to a permission set. Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. A structure that stores the details of the Amazon Web Services managed IAM policy. The name of the Amazon Web Services managed IAM policy. The ARN of the Amazon Web Services managed IAM policy. For more information about ARNs, see Amazon Resource
+ * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
+ * Names (ARNs) and Amazon Web Services Service Namespaces in the Amazon Web Services General Reference. The ARN of the PermissionSet that the managed policy should be attached
+ * to. The Amazon Web Services managed policy ARN to be attached to a permission set. The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
@@ -543,12 +604,12 @@ export interface Tag {
/**
* The key for the tag. The value of the tag. The ARN of the SSO instance under which the operation will be executed. The ARN of the The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
@@ -1067,6 +1160,43 @@ export namespace DescribePermissionSetProvisioningStatusResponse {
});
}
+export interface DetachCustomerManagedPolicyReferenceFromPermissionSetRequest {
+ /**
+ * The ARN of the SSO instance under which the operation will be executed. The ARN of the Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. The ARN of the SSO instance under which the operation will be executed. For more information about ARNs, see Amazon Resource
@@ -1081,7 +1211,7 @@ export interface DetachManagedPolicyFromPermissionSetRequest {
PermissionSetArn: string | undefined;
/**
- * The IAM managed policy ARN to be attached to a permission set. The Amazon Web Services managed policy ARN to be detached from a permission set. The ARN of the SSO instance under which the operation will be executed. The ARN of the Specifies the configuration of the Amazon Web Services managed or customer managed policy that you want to set as a permissions boundary. Specify either Policies used as permissions boundaries do not provide permissions. You must also attach an IAM policy to the role. To learn how the effective permissions for a role are evaluated, see IAM JSON policy evaluation logic in the Identity and Access Management User Guide. Specifies the name and path of the IAM customer managed policy. You must have an IAM policy that matches the name and path in each Amazon Web Services account where you want to deploy your permission set. The Amazon Web Services managed policy ARN that you want to attach to a permission set as a permissions boundary. The permissions boundary attached to the specified permission set. The ARN of the SSO instance under which the operation will be executed. The ARN of the The maximum number of results to display for the list call. The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. Specifies the names and paths of the IAM customer managed policies that you have attached to your permission set. The pagination token for the list API. Initially the value is null. Use the output of previous API calls to make subsequent calls. The maximum number of results to display for the instance. The ARN of the SSO instance under which the operation will be executed. The ARN of the The permissions boundary that you want to attach to a
@@ -426,6 +483,40 @@ export class SSOAdmin extends SSOAdminClient {
}
}
+ /**
+ *
diff --git a/clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts b/clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts
new file mode 100644
index 0000000000000..abd21f373fd66
--- /dev/null
+++ b/clients/client-sso-admin/src/commands/DeletePermissionsBoundaryFromPermissionSetCommand.ts
@@ -0,0 +1,111 @@
+// smithy-typescript generated code
+import { getSerdePlugin } from "@aws-sdk/middleware-serde";
+import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
+import { Command as $Command } from "@aws-sdk/smithy-client";
+import {
+ FinalizeHandlerArguments,
+ Handler,
+ HandlerExecutionContext,
+ HttpHandlerOptions as __HttpHandlerOptions,
+ MetadataBearer as __MetadataBearer,
+ MiddlewareStack,
+ SerdeContext as __SerdeContext,
+} from "@aws-sdk/types";
+
+import {
+ DeletePermissionsBoundaryFromPermissionSetRequest,
+ DeletePermissionsBoundaryFromPermissionSetResponse,
+} from "../models/models_0";
+import {
+ deserializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand,
+ serializeAws_json1_1DeletePermissionsBoundaryFromPermissionSetCommand,
+} from "../protocols/Aws_json1_1";
+import { ServiceInputTypes, ServiceOutputTypes, SSOAdminClientResolvedConfig } from "../SSOAdminClient";
+
+export interface DeletePermissionsBoundaryFromPermissionSetCommandInput
+ extends DeletePermissionsBoundaryFromPermissionSetRequest {}
+export interface DeletePermissionsBoundaryFromPermissionSetCommandOutput
+ extends DeletePermissionsBoundaryFromPermissionSetResponse,
+ __MetadataBearer {}
+
+/**
+ *
/
. For more information, see Friendly names and paths in the Identity and Access Management user guide.PermissionSet
.PermissionSet
.PermissionSet
.PermissionSet
.CustomerManagedPolicyReference
+ * to use the name and path of a customer managed policy, or ManagedPolicyArn
to use the ARN of an Amazon Web Services managed IAM policy. A permissions boundary represents the maximum permissions that any policy
+ * can grant your role. For more information, see Permissions boundaries for IAM entities in the Identity and Access Management User Guide.PermissionSet
.PermissionSet
.PermissionSet
.