diff --git a/.changes/1.20.48.json b/.changes/1.20.48.json new file mode 100644 index 0000000000..485178b9ca --- /dev/null +++ b/.changes/1.20.48.json @@ -0,0 +1,37 @@ +[ + { + "category": "``lookoutequipment``", + "description": "This release introduces support for Amazon Lookout for Equipment.", + "type": "api-change" + }, + { + "category": "``kinesis-video-archived-media``", + "description": "Documentation updates for archived.kinesisvideo", + "type": "api-change" + }, + { + "category": "``robomaker``", + "description": "This release allows RoboMaker customers to specify custom tools to run with their simulation job", + "type": "api-change" + }, + { + "category": "``appstream``", + "description": "This release provides support for image updates", + "type": "api-change" + }, + { + "category": "``ram``", + "description": "Documentation updates for AWS RAM resource sharing", + "type": "api-change" + }, + { + "category": "``customer-profiles``", + "description": "Documentation updates for Put-Integration API", + "type": "api-change" + }, + { + "category": "``autoscaling``", + "description": "Amazon EC2 Auto Scaling announces Warm Pools that help applications to scale out faster by pre-initializing EC2 instances and save money by requiring fewer continuously running instances", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 307354ba65..5ac79b5569 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,18 @@ CHANGELOG ========= +1.20.48 +======= + +* api-change:``lookoutequipment``: This release introduces support for Amazon Lookout for Equipment. +* api-change:``kinesis-video-archived-media``: Documentation updates for archived.kinesisvideo +* api-change:``robomaker``: This release allows RoboMaker customers to specify custom tools to run with their simulation job +* api-change:``appstream``: This release provides support for image updates +* api-change:``ram``: Documentation updates for AWS RAM resource sharing +* api-change:``customer-profiles``: Documentation updates for Put-Integration API +* api-change:``autoscaling``: Amazon EC2 Auto Scaling announces Warm Pools that help applications to scale out faster by pre-initializing EC2 instances and save money by requiring fewer continuously running instances + + 1.20.47 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 4dd8e99f9e..80733a8b78 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import re import logging -__version__ = '1.20.47' +__version__ = '1.20.48' class NullHandler(logging.Handler): diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index 1580939eda..171df1ec63 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -190,6 +190,25 @@ ], "documentation":"

Creates a temporary URL to start an AppStream 2.0 streaming session for the specified user. A streaming URL enables application streaming to be tested without user setup.

" }, + "CreateUpdatedImage":{ + "name":"CreateUpdatedImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUpdatedImageRequest"}, + "output":{"shape":"CreateUpdatedImageResult"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidAccountStatusException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"IncompatibleImageException"} + ], + "documentation":"

Creates a new image with the latest Windows operating system updates, driver updates, and AppStream 2.0 agent software.

For more information, see the \"Update an Image by Using Managed AppStream 2.0 Image Updates\" section in Administer Your AppStream 2.0 Images, in the Amazon AppStream 2.0 Administration Guide.

" + }, "CreateUsageReportSubscription":{ "name":"CreateUsageReportSubscription", "http":{ @@ -1339,6 +1358,49 @@ } } }, + "CreateUpdatedImageRequest":{ + "type":"structure", + "required":[ + "existingImageName", + "newImageName" + ], + "members":{ + "existingImageName":{ + "shape":"Name", + "documentation":"

The name of the image to update.

" + }, + "newImageName":{ + "shape":"Name", + "documentation":"

The name of the new image. The name must be unique within the AWS account and Region.

" + }, + "newImageDescription":{ + "shape":"Description", + "documentation":"

The description to display for the new image.

" + }, + "newImageDisplayName":{ + "shape":"DisplayName", + "documentation":"

The name to display for the new image.

" + }, + "newImageTags":{ + "shape":"Tags", + "documentation":"

The tags to associate with the new image. A tag is a key-value pair, and the value is optional. For example, Environment=Test. If you do not specify a value, Environment=.

Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following special characters:

_ . : / = + \\ - @

If you do not specify a value, the value is set to an empty string.

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

" + }, + "dryRun":{ + "shape":"Boolean", + "documentation":"

Indicates whether to display the status of image update availability before AppStream 2.0 initiates the process of creating a new updated image. If this value is set to true, AppStream 2.0 displays whether image updates are available. If this value is set to false, AppStream 2.0 initiates the process of creating a new updated image without displaying whether image updates are available.

" + } + } + }, + "CreateUpdatedImageResult":{ + "type":"structure", + "members":{ + "image":{"shape":"Image"}, + "canUpdateImage":{ + "shape":"Boolean", + "documentation":"

Indicates whether a new image can be created.

" + } + } + }, "CreateUsageReportSubscriptionRequest":{ "type":"structure", "members":{ @@ -2279,6 +2341,10 @@ "ImagePermissions":{ "shape":"ImagePermissions", "documentation":"

The permissions to provide to the destination AWS account for the specified image.

" + }, + "ImageErrors":{ + "shape":"ResourceErrors", + "documentation":"

Describes the errors that are returned when a new image can't be created.

" } }, "documentation":"

Describes an image.

" @@ -2374,7 +2440,9 @@ "REBOOTING", "SNAPSHOTTING", "DELETING", - "FAILED" + "FAILED", + "UPDATING", + "PENDING_QUALIFICATION" ] }, "ImageBuilderStateChangeReason":{ @@ -2455,7 +2523,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The image does not support storage connectors.

", + "documentation":"

The image can't be updated because it's not compatible for updates.

", "exception":true }, "Integer":{"type":"integer"}, diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 4b65e552f3..971e8e859a 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -263,6 +263,25 @@ ], "documentation":"

Deletes the specified tags.

" }, + "DeleteWarmPool":{ + "name":"DeleteWarmPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWarmPoolType"}, + "output":{ + "shape":"DeleteWarmPoolAnswer", + "resultWrapper":"DeleteWarmPoolResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"}, + {"shape":"ScalingActivityInProgressFault"}, + {"shape":"ResourceInUseFault"} + ], + "documentation":"

Deletes the warm pool for the specified Auto Scaling group.

" + }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", "http":{ @@ -570,6 +589,24 @@ ], "documentation":"

Describes the termination policies supported by Amazon EC2 Auto Scaling.

For more information, see Controlling which Auto Scaling instances terminate during scale in in the Amazon EC2 Auto Scaling User Guide.

" }, + "DescribeWarmPool":{ + "name":"DescribeWarmPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeWarmPoolType"}, + "output":{ + "shape":"DescribeWarmPoolAnswer", + "resultWrapper":"DescribeWarmPoolResult" + }, + "errors":[ + {"shape":"InvalidNextToken"}, + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ], + "documentation":"

Describes a warm pool and its instances.

" + }, "DetachInstances":{ "name":"DetachInstances", "http":{ @@ -750,6 +787,23 @@ ], "documentation":"

Creates or updates a scheduled scaling action for an Auto Scaling group.

For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.

" }, + "PutWarmPool":{ + "name":"PutWarmPool", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutWarmPoolType"}, + "output":{ + "shape":"PutWarmPoolAnswer", + "resultWrapper":"PutWarmPoolResult" + }, + "errors":[ + {"shape":"LimitExceededFault"}, + {"shape":"ResourceContentionFault"} + ], + "documentation":"

Adds a warm pool to the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto Scaling group can draw on the warm pool to meet its new desired capacity. For more information, see Warm pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

This operation must be called from the Region in which the Auto Scaling group was created. This operation cannot be called on an Auto Scaling group that has a mixed instances policy or a launch template or launch configuration that requests Spot Instances.

You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by calling the DeleteWarmPool API.

" + }, "RecordLifecycleActionHeartbeat":{ "name":"RecordLifecycleActionHeartbeat", "http":{ @@ -764,7 +818,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Auto Scaling lifecycle in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.

  5. If you finish before the timeout period ends, complete the lifecycle action.

For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.

" }, "ResumeProcesses":{ "name":"ResumeProcesses", @@ -819,7 +873,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Updates the instance protection settings of the specified instances.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails.

" + "documentation":"

Updates the instance protection settings of the specified instances. This operation cannot be called on instances in a warm pool.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance scale-in protection in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails.

" }, "StartInstanceRefresh":{ "name":"StartInstanceRefresh", @@ -837,7 +891,7 @@ {"shape":"ResourceContentionFault"}, {"shape":"InstanceRefreshInProgressFault"} ], - "documentation":"

Starts a new instance refresh operation, which triggers a rolling replacement of all previously launched instances in the Auto Scaling group with a new group of instances.

If successful, this call creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in progress, use the CancelInstanceRefresh API.

For more information, see Replacing Auto Scaling instances based on an instance refresh in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Starts a new instance refresh operation, which triggers a rolling replacement of previously launched instances in the Auto Scaling group with a new group of instances.

If successful, this call creates a new instance refresh request with a unique ID that you can use to track its progress. To query its status, call the DescribeInstanceRefreshes API. To describe the instance refreshes that have already run, call the DescribeInstanceRefreshes API. To cancel an instance refresh operation in progress, use the CancelInstanceRefresh API.

For more information, see Replacing Auto Scaling instances based on an instance refresh in the Amazon EC2 Auto Scaling User Guide.

" }, "SuspendProcesses":{ "name":"SuspendProcesses", @@ -867,7 +921,7 @@ {"shape":"ScalingActivityInProgressFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Terminates the specified instance and optionally adjusts the desired group size.

This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing activities in the Amazon EC2 Auto Scaling User Guide.

" + "documentation":"

Terminates the specified instance and optionally adjusts the desired group size. This operation cannot be called on instances in a warm pool.

This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated.

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing activities in the Amazon EC2 Auto Scaling User Guide.

" }, "UpdateAutoScalingGroup":{ "name":"UpdateAutoScalingGroup", @@ -1223,6 +1277,14 @@ "CapacityRebalance":{ "shape":"CapacityRebalanceEnabled", "documentation":"

Indicates whether Capacity Rebalancing is enabled.

" + }, + "WarmPoolConfiguration":{ + "shape":"WarmPoolConfiguration", + "documentation":"

The warm pool for the group.

" + }, + "WarmPoolSize":{ + "shape":"WarmPoolSize", + "documentation":"

The current size of the warm pool.

" } }, "documentation":"

Describes an Auto Scaling group.

" @@ -1303,7 +1365,7 @@ }, "LifecycleState":{ "shape":"XmlStringMaxLen32", - "documentation":"

The lifecycle state for the instance. The Quarantined state is not used. For information about lifecycle states, see Instance lifecycle in the Amazon EC2 Auto Scaling User Guide.

Valid Values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService | Terminating | Terminating:Wait | Terminating:Proceed | Terminated | Detaching | Detached | EnteringStandby | Standby

" + "documentation":"

The lifecycle state for the instance. The Quarantined state is not used. For information about lifecycle states, see Instance lifecycle in the Amazon EC2 Auto Scaling User Guide.

Valid Values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService | Terminating | Terminating:Wait | Terminating:Proceed | Terminated | Detaching | Detached | EnteringStandby | Standby | Warmed:Pending | Warmed:Pending:Wait | Warmed:Pending:Proceed | Warmed:Terminating | Warmed:Terminating:Wait | Warmed:Terminating:Proceed | Warmed:Terminated | Warmed:Stopped | Warmed:Running

" }, "HealthStatus":{ "shape":"XmlStringMaxLen32", @@ -1753,7 +1815,7 @@ }, "ForceDelete":{ "shape":"ForceDelete", - "documentation":"

Specifies that the group is to be deleted along with all instances associated with the group, without waiting for all instances to be terminated. This parameter also deletes any lifecycle actions associated with the group.

" + "documentation":"

Specifies that the group is to be deleted along with all instances associated with the group, without waiting for all instances to be terminated. This parameter also deletes any outstanding lifecycle actions associated with the group.

" } } }, @@ -1837,6 +1899,25 @@ } } }, + "DeleteWarmPoolAnswer":{ + "type":"structure", + "members":{ + } + }, + "DeleteWarmPoolType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + }, + "ForceDelete":{ + "shape":"ForceDelete", + "documentation":"

Specifies that the warm pool is to be deleted along with all instances associated with the warm pool, without waiting for all instances to be terminated. This parameter also deletes any outstanding lifecycle actions associated with the warm pool instances.

" + } + } + }, "DescribeAccountLimitsAnswer":{ "type":"structure", "members":{ @@ -2171,6 +2252,41 @@ } } }, + "DescribeWarmPoolAnswer":{ + "type":"structure", + "members":{ + "WarmPoolConfiguration":{ + "shape":"WarmPoolConfiguration", + "documentation":"

The warm pool configuration details.

" + }, + "Instances":{ + "shape":"Instances", + "documentation":"

The instances that are currently in the warm pool.

" + }, + "NextToken":{ + "shape":"XmlString", + "documentation":"

The token for the next set of items to return. (You received this token from a previous call.)

" + } + } + }, + "DescribeWarmPoolType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + }, + "MaxRecords":{ + "shape":"MaxRecords", + "documentation":"

The maximum number of instances to return with this call. The maximum value is 50.

" + }, + "NextToken":{ + "shape":"XmlString", + "documentation":"

The token for the next set of instances to return. (You received this token from a previous call.)

" + } + } + }, "DetachInstancesAnswer":{ "type":"structure", "members":{ @@ -2255,7 +2371,7 @@ }, "Metrics":{ "shape":"Metrics", - "documentation":"

Specifies one or more of the following metrics:

If you omit this parameter, all metrics are disabled.

" + "documentation":"

Specifies one or more of the following metrics:

If you omit this parameter, all metrics are disabled.

" } } }, @@ -2304,7 +2420,7 @@ }, "Metrics":{ "shape":"Metrics", - "documentation":"

Specifies which group-level metrics to start collecting. You can specify one or more of the following metrics:

The instance weighting feature supports the following additional metrics:

If you omit this parameter, all metrics are enabled.

" + "documentation":"

Specifies which group-level metrics to start collecting. You can specify one or more of the following metrics:

The instance weighting feature supports the following additional metrics:

The warm pools feature supports the following additional metrics:

If you omit this parameter, all metrics are enabled.

" }, "Granularity":{ "shape":"XmlStringMaxLen255", @@ -2317,7 +2433,7 @@ "members":{ "Metric":{ "shape":"XmlStringMaxLen255", - "documentation":"

One of the following metrics:

" + "documentation":"

One of the following metrics:

" }, "Granularity":{ "shape":"XmlStringMaxLen255", @@ -2592,6 +2708,10 @@ "InstancesToUpdate":{ "shape":"InstancesToUpdate", "documentation":"

The number of instances remaining to update before the instance refresh is complete.

" + }, + "ProgressDetails":{ + "shape":"InstanceRefreshProgressDetails", + "documentation":"

Additional progress details for an Auto Scaling group that has a warm pool.

" } }, "documentation":"

Describes an instance refresh for an Auto Scaling group.

" @@ -2613,6 +2733,34 @@ }, "exception":true }, + "InstanceRefreshLivePoolProgress":{ + "type":"structure", + "members":{ + "PercentageComplete":{ + "shape":"IntPercent", + "documentation":"

The percentage of instances in the Auto Scaling group that have been replaced. For each instance replacement, Amazon EC2 Auto Scaling tracks the instance's health status and warm-up time. When the instance's health status changes to healthy and the specified warm-up time passes, the instance is considered updated and added to the percentage complete.

" + }, + "InstancesToUpdate":{ + "shape":"InstancesToUpdate", + "documentation":"

The number of instances remaining to update.

" + } + }, + "documentation":"

Reports the progress of an instance fresh on instances that are in the Auto Scaling group.

" + }, + "InstanceRefreshProgressDetails":{ + "type":"structure", + "members":{ + "LivePoolProgress":{ + "shape":"InstanceRefreshLivePoolProgress", + "documentation":"

Indicates the progress of an instance fresh on instances that are in the Auto Scaling group.

" + }, + "WarmPoolProgress":{ + "shape":"InstanceRefreshWarmPoolProgress", + "documentation":"

Indicates the progress of an instance fresh on instances that are in the warm pool.

" + } + }, + "documentation":"

Reports the progress of an instance refresh on an Auto Scaling group that has a warm pool. This includes separate details for instances in the warm pool and instances in the Auto Scaling group (the live pool).

" + }, "InstanceRefreshStatus":{ "type":"string", "enum":[ @@ -2624,6 +2772,20 @@ "Cancelled" ] }, + "InstanceRefreshWarmPoolProgress":{ + "type":"structure", + "members":{ + "PercentageComplete":{ + "shape":"IntPercent", + "documentation":"

The percentage of instances in the warm pool that have been replaced. For each instance replacement, Amazon EC2 Auto Scaling tracks the instance's health status and warm-up time. When the instance's health status changes to healthy and the specified warm-up time passes, the instance is considered updated and added to the percentage complete.

" + }, + "InstancesToUpdate":{ + "shape":"InstancesToUpdate", + "documentation":"

The number of instances remaining to update.

" + } + }, + "documentation":"

Reports the progress of an instance fresh on instances that are in the warm pool.

" + }, "InstanceRefreshes":{ "type":"list", "member":{"shape":"InstanceRefresh"} @@ -2998,7 +3160,16 @@ "Detaching", "Detached", "EnteringStandby", - "Standby" + "Standby", + "Warmed:Pending", + "Warmed:Pending:Wait", + "Warmed:Pending:Proceed", + "Warmed:Terminating", + "Warmed:Terminating:Wait", + "Warmed:Terminating:Proceed", + "Warmed:Terminated", + "Warmed:Stopped", + "Warmed:Running" ] }, "LifecycleTransition":{"type":"string"}, @@ -3058,6 +3229,10 @@ "type":"list", "member":{"shape":"LoadBalancerTargetGroupState"} }, + "MaxGroupPreparedCapacity":{ + "type":"integer", + "min":-1 + }, "MaxInstanceLifetime":{"type":"integer"}, "MaxNumberOfAutoScalingGroups":{"type":"integer"}, "MaxNumberOfLaunchConfigurations":{"type":"integer"}, @@ -3067,7 +3242,7 @@ "members":{ "Metric":{ "shape":"XmlStringMaxLen255", - "documentation":"

One of the following metrics:

" + "documentation":"

One of the following metrics:

" } }, "documentation":"

Describes a metric.

" @@ -3468,6 +3643,33 @@ } } }, + "PutWarmPoolAnswer":{ + "type":"structure", + "members":{ + } + }, + "PutWarmPoolType":{ + "type":"structure", + "required":["AutoScalingGroupName"], + "members":{ + "AutoScalingGroupName":{ + "shape":"XmlStringMaxLen255", + "documentation":"

The name of the Auto Scaling group.

" + }, + "MaxGroupPreparedCapacity":{ + "shape":"MaxGroupPreparedCapacity", + "documentation":"

Specifies the total maximum number of instances that are allowed to be in the warm pool or in any state except Terminated for the Auto Scaling group. This is an optional property. Specify it only if the warm pool size should not be determined by the difference between the group's maximum capacity and its desired capacity.

Amazon EC2 Auto Scaling will launch and maintain either the difference between the group's maximum capacity and its desired capacity, if a value for MaxGroupPreparedCapacity is not specified, or the difference between the MaxGroupPreparedCapacity and the desired capacity, if a value for MaxGroupPreparedCapacity is specified.

The size of the warm pool is dynamic. Only when MaxGroupPreparedCapacity and MinSize are set to the same value does the warm pool have an absolute size.

If the desired capacity of the Auto Scaling group is higher than the MaxGroupPreparedCapacity, the capacity of the warm pool is 0. To remove a value that you previously set, include the property but specify -1 for the value.

" + }, + "MinSize":{ + "shape":"WarmPoolMinSize", + "documentation":"

Specifies the minimum number of instances to maintain in the warm pool. This helps you to ensure that there is always a certain number of warmed instances available to handle traffic spikes. Defaults to 0 if not specified.

" + }, + "PoolState":{ + "shape":"WarmPoolState", + "documentation":"

Sets the instance state to transition to after the lifecycle hooks finish. Valid values are: Stopped (default) or Running.

" + } + } + }, "RecordLifecycleActionHeartbeatAnswer":{ "type":"structure", "members":{ @@ -4177,6 +4379,44 @@ "type":"list", "member":{"shape":"XmlString"} }, + "WarmPoolConfiguration":{ + "type":"structure", + "members":{ + "MaxGroupPreparedCapacity":{ + "shape":"MaxGroupPreparedCapacity", + "documentation":"

The total maximum number of instances that are allowed to be in the warm pool or in any state except Terminated for the Auto Scaling group.

" + }, + "MinSize":{ + "shape":"WarmPoolMinSize", + "documentation":"

The minimum number of instances to maintain in the warm pool.

" + }, + "PoolState":{ + "shape":"WarmPoolState", + "documentation":"

The instance state to transition to after the lifecycle actions are complete: Stopped or Running.

" + }, + "Status":{ + "shape":"WarmPoolStatus", + "documentation":"

The status of a warm pool that is marked for deletion.

" + } + }, + "documentation":"

Describes a warm pool configuration.

" + }, + "WarmPoolMinSize":{ + "type":"integer", + "min":0 + }, + "WarmPoolSize":{"type":"integer"}, + "WarmPoolState":{ + "type":"string", + "enum":[ + "Stopped", + "Running" + ] + }, + "WarmPoolStatus":{ + "type":"string", + "enum":["PendingDelete"] + }, "XmlString":{ "type":"string", "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" diff --git a/botocore/data/customer-profiles/2020-08-15/service-2.json b/botocore/data/customer-profiles/2020-08-15/service-2.json index babb5210be..f7cff11557 100644 --- a/botocore/data/customer-profiles/2020-08-15/service-2.json +++ b/botocore/data/customer-profiles/2020-08-15/service-2.json @@ -616,12 +616,28 @@ "ConnectorOperator":{ "type":"structure", "members":{ - "Marketo":{"shape":"MarketoConnectorOperator"}, - "S3":{"shape":"S3ConnectorOperator"}, - "Salesforce":{"shape":"SalesforceConnectorOperator"}, - "ServiceNow":{"shape":"ServiceNowConnectorOperator"}, - "Zendesk":{"shape":"ZendeskConnectorOperator"} - } + "Marketo":{ + "shape":"MarketoConnectorOperator", + "documentation":"

The operation to be performed on the provided Marketo source fields.

" + }, + "S3":{ + "shape":"S3ConnectorOperator", + "documentation":"

The operation to be performed on the provided Amazon S3 source fields.

" + }, + "Salesforce":{ + "shape":"SalesforceConnectorOperator", + "documentation":"

The operation to be performed on the provided Salesforce source fields.

" + }, + "ServiceNow":{ + "shape":"ServiceNowConnectorOperator", + "documentation":"

The operation to be performed on the provided ServiceNow source fields.

" + }, + "Zendesk":{ + "shape":"ZendeskConnectorOperator", + "documentation":"

The operation to be performed on the provided Zendesk source fields.

" + } + }, + "documentation":"

The operation to be performed on the provided source fields.

" }, "ConnectorProfileName":{ "type":"string", @@ -1063,13 +1079,32 @@ "TriggerConfig" ], "members":{ - "Description":{"shape":"FlowDescription"}, - "FlowName":{"shape":"FlowName"}, - "KmsArn":{"shape":"KmsArn"}, - "SourceFlowConfig":{"shape":"SourceFlowConfig"}, - "Tasks":{"shape":"Tasks"}, - "TriggerConfig":{"shape":"TriggerConfig"} - } + "Description":{ + "shape":"FlowDescription", + "documentation":"

A description of the flow you want to create.

" + }, + "FlowName":{ + "shape":"FlowName", + "documentation":"

The specified name of the flow. Use underscores (_) or hyphens (-) only. Spaces are not allowed.

" + }, + "KmsArn":{ + "shape":"KmsArn", + "documentation":"

The Amazon Resource Name of the AWS Key Management Service (KMS) key you provide for encryption.

" + }, + "SourceFlowConfig":{ + "shape":"SourceFlowConfig", + "documentation":"

The configuration that controls how Customer Profiles retrieves data from the source.

" + }, + "Tasks":{ + "shape":"Tasks", + "documentation":"

A list of tasks that Customer Profiles performs while transferring the data in the flow run.

" + }, + "TriggerConfig":{ + "shape":"TriggerConfig", + "documentation":"

The trigger settings that determine how and when the flow runs.

" + } + }, + "documentation":"

The configurations that control how Customer Profiles retrieves data from the source, Amazon AppFlow. Customer Profiles uses this information to create an AppFlow flow on behalf of customers.

" }, "FlowDescription":{ "type":"string", @@ -1316,8 +1351,12 @@ "IncrementalPullConfig":{ "type":"structure", "members":{ - "DatetimeTypeFieldName":{"shape":"DatetimeTypeFieldName"} - } + "DatetimeTypeFieldName":{ + "shape":"DatetimeTypeFieldName", + "documentation":"

A field that specifies the date time or timestamp field as the criteria to use when importing incremental records from the source.

" + } + }, + "documentation":"

Specifies the configuration used when importing incremental records from the source.

" }, "IntegrationList":{ "type":"list", @@ -1738,8 +1777,12 @@ "type":"structure", "required":["Object"], "members":{ - "Object":{"shape":"Object"} - } + "Object":{ + "shape":"Object", + "documentation":"

The object specified in the Marketo flow source.

" + } + }, + "documentation":"

The properties that are applied when Marketo is being used as a source.

" }, "Object":{ "type":"string", @@ -1949,7 +1992,10 @@ "shape":"TagMap", "documentation":"

The tags used to organize, track, or control access for this resource.

" }, - "FlowDefinition":{"shape":"FlowDefinition"} + "FlowDefinition":{ + "shape":"FlowDefinition", + "documentation":"

The configuration that controls how Customer Profiles retrieves data from the source.

" + } } }, "PutIntegrationResponse":{ @@ -2166,9 +2212,16 @@ "type":"structure", "required":["BucketName"], "members":{ - "BucketName":{"shape":"BucketName"}, - "BucketPrefix":{"shape":"BucketPrefix"} - } + "BucketName":{ + "shape":"BucketName", + "documentation":"

The Amazon S3 bucket name where the source files are stored.

" + }, + "BucketPrefix":{ + "shape":"BucketPrefix", + "documentation":"

The object key for the Amazon S3 bucket in which the source files are stored.

" + } + }, + "documentation":"

The properties that are applied when Amazon S3 is being used as the flow source.

" }, "SalesforceConnectorOperator":{ "type":"string", @@ -2200,10 +2253,20 @@ "type":"structure", "required":["Object"], "members":{ - "Object":{"shape":"Object"}, - "EnableDynamicFieldUpdate":{"shape":"boolean"}, - "IncludeDeletedRecords":{"shape":"boolean"} - } + "Object":{ + "shape":"Object", + "documentation":"

The object specified in the Salesforce flow source.

" + }, + "EnableDynamicFieldUpdate":{ + "shape":"boolean", + "documentation":"

The flag that enables dynamic fetching of new (recently added) fields in the Salesforce objects while running a flow.

" + }, + "IncludeDeletedRecords":{ + "shape":"boolean", + "documentation":"

Indicates whether Amazon AppFlow includes deleted files in the flow run.

" + } + }, + "documentation":"

The properties that are applied when Salesforce is being used as a source.

" }, "ScheduleExpression":{ "type":"string", @@ -2219,17 +2282,37 @@ "type":"structure", "required":["ScheduleExpression"], "members":{ - "ScheduleExpression":{"shape":"ScheduleExpression"}, - "DataPullMode":{"shape":"DataPullMode"}, - "ScheduleStartTime":{"shape":"Date"}, - "ScheduleEndTime":{"shape":"Date"}, - "Timezone":{"shape":"Timezone"}, + "ScheduleExpression":{ + "shape":"ScheduleExpression", + "documentation":"

The scheduling expression that determines the rate at which the schedule will run, for example rate (5 minutes).

" + }, + "DataPullMode":{ + "shape":"DataPullMode", + "documentation":"

Specifies whether a scheduled flow has an incremental data transfer or a complete data transfer for each flow run.

" + }, + "ScheduleStartTime":{ + "shape":"Date", + "documentation":"

Specifies the scheduled start time for a scheduled-trigger flow.

" + }, + "ScheduleEndTime":{ + "shape":"Date", + "documentation":"

Specifies the scheduled end time for a scheduled-trigger flow.

" + }, + "Timezone":{ + "shape":"Timezone", + "documentation":"

Specifies the time zone used when referring to the date and time of a scheduled-triggered flow, such as America/New_York.

" + }, "ScheduleOffset":{ "shape":"ScheduleOffset", + "documentation":"

Specifies the optional offset that is added to the time interval for a schedule-triggered flow.

", "box":true }, - "FirstExecutionFrom":{"shape":"Date"} - } + "FirstExecutionFrom":{ + "shape":"Date", + "documentation":"

Specifies the date range for the records to import from the connector in the first flow run.

" + } + }, + "documentation":"

Specifies the configuration details of a scheduled-trigger flow that you define. Currently, these settings only apply to the scheduled-trigger type.

" }, "SearchProfilesRequest":{ "type":"structure", @@ -2310,18 +2393,38 @@ "type":"structure", "required":["Object"], "members":{ - "Object":{"shape":"Object"} - } + "Object":{ + "shape":"Object", + "documentation":"

The object specified in the ServiceNow flow source.

" + } + }, + "documentation":"

The properties that are applied when ServiceNow is being used as a source.

" }, "SourceConnectorProperties":{ "type":"structure", "members":{ - "Marketo":{"shape":"MarketoSourceProperties"}, - "S3":{"shape":"S3SourceProperties"}, - "Salesforce":{"shape":"SalesforceSourceProperties"}, - "ServiceNow":{"shape":"ServiceNowSourceProperties"}, - "Zendesk":{"shape":"ZendeskSourceProperties"} - } + "Marketo":{ + "shape":"MarketoSourceProperties", + "documentation":"

The properties that are applied when Marketo is being used as a source.

" + }, + "S3":{ + "shape":"S3SourceProperties", + "documentation":"

The properties that are applied when Amazon S3 is being used as the flow source.

" + }, + "Salesforce":{ + "shape":"SalesforceSourceProperties", + "documentation":"

The properties that are applied when Salesforce is being used as a source.

" + }, + "ServiceNow":{ + "shape":"ServiceNowSourceProperties", + "documentation":"

The properties that are applied when ServiceNow is being used as a source.

" + }, + "Zendesk":{ + "shape":"ZendeskSourceProperties", + "documentation":"

The properties that are applied when using Zendesk as a flow source.

" + } + }, + "documentation":"

Specifies the information that is required to query a particular Amazon AppFlow connector. Customer Profiles supports Salesforce, Zendesk, Marketo, ServiceNow and Amazon S3.

" }, "SourceConnectorType":{ "type":"string", @@ -2344,11 +2447,24 @@ "SourceConnectorProperties" ], "members":{ - "ConnectorProfileName":{"shape":"ConnectorProfileName"}, - "ConnectorType":{"shape":"SourceConnectorType"}, - "IncrementalPullConfig":{"shape":"IncrementalPullConfig"}, - "SourceConnectorProperties":{"shape":"SourceConnectorProperties"} - } + "ConnectorProfileName":{ + "shape":"ConnectorProfileName", + "documentation":"

The name of the AppFlow connector profile. This name must be unique for each connector profile in the AWS account.

" + }, + "ConnectorType":{ + "shape":"SourceConnectorType", + "documentation":"

The type of connector, such as Salesforce, Marketo, and so on.

" + }, + "IncrementalPullConfig":{ + "shape":"IncrementalPullConfig", + "documentation":"

Defines the configuration for a scheduled incremental data pull. If a valid configuration is provided, the fields specified in the configuration are used when querying for the incremental data pull.

" + }, + "SourceConnectorProperties":{ + "shape":"SourceConnectorProperties", + "documentation":"

Specifies the information that is required to query a particular source connector.

" + } + }, + "documentation":"

Contains information about the configuration of the source connector used in the flow.

" }, "StandardIdentifier":{ "type":"string", @@ -2423,12 +2539,28 @@ "TaskType" ], "members":{ - "ConnectorOperator":{"shape":"ConnectorOperator"}, - "DestinationField":{"shape":"DestinationField"}, - "SourceFields":{"shape":"SourceFields"}, - "TaskProperties":{"shape":"TaskPropertiesMap"}, - "TaskType":{"shape":"TaskType"} - } + "ConnectorOperator":{ + "shape":"ConnectorOperator", + "documentation":"

The operation to be performed on the provided source fields.

" + }, + "DestinationField":{ + "shape":"DestinationField", + "documentation":"

A field in a destination connector, or a field value against which Amazon AppFlow validates a source field.

" + }, + "SourceFields":{ + "shape":"SourceFields", + "documentation":"

The source fields to which a particular task is applied.

" + }, + "TaskProperties":{ + "shape":"TaskPropertiesMap", + "documentation":"

A map used to store task-related information. The service looks for particular information based on the TaskType.

" + }, + "TaskType":{ + "shape":"TaskType", + "documentation":"

Specifies the particular task implementation that Amazon AppFlow performs.

" + } + }, + "documentation":"

A class for modeling different type of tasks. Task implementation varies based on the TaskType.

" }, "TaskPropertiesMap":{ "type":"map", @@ -2469,15 +2601,26 @@ "type":"structure", "required":["TriggerType"], "members":{ - "TriggerType":{"shape":"TriggerType"}, - "TriggerProperties":{"shape":"TriggerProperties"} - } + "TriggerType":{ + "shape":"TriggerType", + "documentation":"

Specifies the type of flow trigger. It can be OnDemand, Scheduled, or Event.

" + }, + "TriggerProperties":{ + "shape":"TriggerProperties", + "documentation":"

Specifies the configuration details of a schedule-triggered flow that you define. Currently, these settings only apply to the Scheduled trigger type.

" + } + }, + "documentation":"

The trigger settings that determine how and when Amazon AppFlow runs the specified flow.

" }, "TriggerProperties":{ "type":"structure", "members":{ - "Scheduled":{"shape":"ScheduledTriggerProperties"} - } + "Scheduled":{ + "shape":"ScheduledTriggerProperties", + "documentation":"

Specifies the configuration details of a schedule-triggered flow that you define.

" + } + }, + "documentation":"

Specifies the configuration details that control the trigger for a flow. Currently, these settings only apply to the Scheduled trigger type.

" }, "TriggerType":{ "type":"string", @@ -2766,8 +2909,12 @@ "type":"structure", "required":["Object"], "members":{ - "Object":{"shape":"Object"} - } + "Object":{ + "shape":"Object", + "documentation":"

The object specified in the Zendesk flow source.

" + } + }, + "documentation":"

The properties that are applied when using Zendesk as a flow source.

" }, "boolean":{"type":"boolean"}, "encryptionKey":{ @@ -2848,5 +2995,5 @@ "pattern":"[a-f0-9]{32}" } }, - "documentation":"Amazon Connect Customer Profiles

Welcome to the Amazon Connect Customer Profiles API Reference. This guide provides information about the Amazon Connect Customer Profiles API, including supported operations, data types, parameters, and schemas.

Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

If you're new to Amazon Connect, you might find it helpful to also review the Amazon Connect Administrator Guide.

" + "documentation":"Amazon Connect Customer Profiles

Welcome to the Amazon Connect Customer Profiles API Reference. This guide provides information about the Amazon Connect Customer Profiles API, including supported operations, data types, parameters, and schemas.

Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

If you're new to Amazon Connect , you might find it helpful to also review the Amazon Connect Administrator Guide.

" } diff --git a/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json b/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json index 564c813c3d..791ed11965 100644 --- a/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json +++ b/botocore/data/kinesis-video-archived-media/2017-09-30/service-2.json @@ -412,7 +412,7 @@ }, "DiscontinuityMode":{ "shape":"HLSDiscontinuityMode", - "documentation":"

Specifies when flags marking discontinuities between fragments are added to the media playlists.

Media players typically build a timeline of media content to play, based on the timestamps of each fragment. This means that if there is any overlap or gap between fragments (as is typical if HLSFragmentSelector is set to SERVER_TIMESTAMP), the media player timeline will also have small gaps between fragments in some places, and will overwrite frames in other places. Gaps in the media player timeline can cause playback to stall and overlaps can cause playback to be jittery. When there are discontinuity flags between fragments, the media player is expected to reset the timeline, resulting in the next fragment being played immediately after the previous fragment.

The following modes are supported:

The default is ALWAYS when HLSFragmentSelector is set to SERVER_TIMESTAMP, and NEVER when it is set to PRODUCER_TIMESTAMP.

" + "documentation":"

Specifies when flags marking discontinuities between fragments are added to the media playlists.

Media players typically build a timeline of media content to play, based on the timestamps of each fragment. This means that if there is any overlap or gap between fragments (as is typical if HLSFragmentSelector is set to SERVER_TIMESTAMP), the media player timeline will also have small gaps between fragments in some places, and will overwrite frames in other places. Gaps in the media player timeline can cause playback to stall and overlaps can cause playback to be jittery. When there are discontinuity flags between fragments, the media player is expected to reset the timeline, resulting in the next fragment being played immediately after the previous fragment.

The following modes are supported:

The default is ALWAYS when HLSFragmentSelector is set to SERVER_TIMESTAMP, and NEVER when it is set to PRODUCER_TIMESTAMP.

" }, "DisplayFragmentTimestamp":{ "shape":"HLSDisplayFragmentTimestamp", diff --git a/botocore/data/lookoutequipment/2020-12-15/paginators-1.json b/botocore/data/lookoutequipment/2020-12-15/paginators-1.json new file mode 100644 index 0000000000..ea142457a6 --- /dev/null +++ b/botocore/data/lookoutequipment/2020-12-15/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/lookoutequipment/2020-12-15/service-2.json b/botocore/data/lookoutequipment/2020-12-15/service-2.json new file mode 100644 index 0000000000..a355bd98c5 --- /dev/null +++ b/botocore/data/lookoutequipment/2020-12-15/service-2.json @@ -0,0 +1,1935 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-12-15", + "endpointPrefix":"lookoutequipment", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"LookoutEquipment", + "serviceFullName":"Amazon Lookout for Equipment", + "serviceId":"LookoutEquipment", + "signatureVersion":"v4", + "targetPrefix":"AWSLookoutEquipmentFrontendService", + "uid":"lookoutequipment-2020-12-15" + }, + "operations":{ + "CreateDataset":{ + "name":"CreateDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDatasetRequest"}, + "output":{"shape":"CreateDatasetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a container for a collection of data being ingested for analysis. The dataset contains the metadata describing where the data is and what the data actually looks like. In other words, it contains the location of the data source, the data schema, and other information. A dataset also contains any tags associated with the ingested data.

" + }, + "CreateInferenceScheduler":{ + "name":"CreateInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateInferenceSchedulerRequest"}, + "output":{"shape":"CreateInferenceSchedulerResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a scheduled inference. Scheduling an inference is setting up a continuous real-time inference plan to analyze new measurement data. When setting up the schedule, you provide an S3 bucket location for the input data, assign it a delimiter between separate entries in the data, set an offset delay if desired, and set the frequency of inferencing. You must also provide an S3 bucket location for the output data.

" + }, + "CreateModel":{ + "name":"CreateModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateModelRequest"}, + "output":{"shape":"CreateModelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates an ML model for data inference.

A machine-learning (ML) model is a mathematical model that finds patterns in your data. In Amazon Lookout for Equipment, the model learns the patterns of normal behavior and detects abnormal behavior that could be potential equipment failure (or maintenance events). The models are made by analyzing normal data and abnormalities in machine behavior that have already occurred.

Your model is trained using a portion of the data from your dataset and uses that data to learn patterns of normal behavior and abnormal patterns that lead to equipment failure. Another portion of the data is used to evaluate the model's accuracy.

" + }, + "DeleteDataset":{ + "name":"DeleteDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDatasetRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes a dataset and associated artifacts. The operation will check to see if any inference scheduler or data ingestion job is currently using the dataset, and if there isn't, the dataset, its metadata, and any associated data stored in S3 will be deleted. This does not affect any models that used this dataset for training and evaluation, but does prevent it from being used in the future.

" + }, + "DeleteInferenceScheduler":{ + "name":"DeleteInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteInferenceSchedulerRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes an inference scheduler that has been set up. Already processed output results are not affected.

" + }, + "DeleteModel":{ + "name":"DeleteModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteModelRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes an ML model currently available for Amazon Lookout for Equipment. This will prevent it from being used with an inference scheduler, even one that is already set up.

" + }, + "DescribeDataIngestionJob":{ + "name":"DescribeDataIngestionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataIngestionJobRequest"}, + "output":{"shape":"DescribeDataIngestionJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Provides information on a specific data ingestion job such as creation time, dataset ARN, status, and so on.

" + }, + "DescribeDataset":{ + "name":"DescribeDataset", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDatasetRequest"}, + "output":{"shape":"DescribeDatasetResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Provides information on a specified dataset such as the schema location, status, and so on.

" + }, + "DescribeInferenceScheduler":{ + "name":"DescribeInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeInferenceSchedulerRequest"}, + "output":{"shape":"DescribeInferenceSchedulerResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Specifies information about the inference scheduler being used, including name, model, status, and associated metadata

" + }, + "DescribeModel":{ + "name":"DescribeModel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeModelRequest"}, + "output":{"shape":"DescribeModelResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Provides overall information about a specific ML model, including model name and ARN, dataset, training and evaluation information, status, and so on.

" + }, + "ListDataIngestionJobs":{ + "name":"ListDataIngestionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataIngestionJobsRequest"}, + "output":{"shape":"ListDataIngestionJobsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Provides a list of all data ingestion jobs, including dataset name and ARN, S3 location of the input data, status, and so on.

" + }, + "ListDatasets":{ + "name":"ListDatasets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDatasetsRequest"}, + "output":{"shape":"ListDatasetsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all datasets currently available in your account, filtering on the dataset name.

" + }, + "ListInferenceExecutions":{ + "name":"ListInferenceExecutions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInferenceExecutionsRequest"}, + "output":{"shape":"ListInferenceExecutionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all inference executions that have been performed by the specified inference scheduler.

" + }, + "ListInferenceSchedulers":{ + "name":"ListInferenceSchedulers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInferenceSchedulersRequest"}, + "output":{"shape":"ListInferenceSchedulersResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves a list of all inference schedulers currently available for your account.

" + }, + "ListModels":{ + "name":"ListModels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListModelsRequest"}, + "output":{"shape":"ListModelsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Generates a list of all models in the account, including model name and ARN, dataset, and status.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all the tags for a specified resource, including key and value.

" + }, + "StartDataIngestionJob":{ + "name":"StartDataIngestionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDataIngestionJobRequest"}, + "output":{"shape":"StartDataIngestionJobResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts a data ingestion job. Amazon Lookout for Equipment returns the job status.

" + }, + "StartInferenceScheduler":{ + "name":"StartInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartInferenceSchedulerRequest"}, + "output":{"shape":"StartInferenceSchedulerResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts an inference scheduler.

" + }, + "StopInferenceScheduler":{ + "name":"StopInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopInferenceSchedulerRequest"}, + "output":{"shape":"StopInferenceSchedulerResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops an inference scheduler.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Associates a given tag to a resource in your account. A tag is a key-value pair which can be added to an Amazon Lookout for Equipment resource as metadata. Tags can be used for organizing your resources as well as helping you to search and filter by tag. Multiple tags can be added to a resource, either when you create it, or later. Up to 50 tags can be associated with each resource.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Removes a specific tag from a given resource. The tag is specified by its key.

" + }, + "UpdateInferenceScheduler":{ + "name":"UpdateInferenceScheduler", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateInferenceSchedulerRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates an inference scheduler.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "documentation":"

The request could not be completed because you do not have access to the resource.

", + "exception":true + }, + "AmazonResourceArn":{ + "type":"string", + "max":1011, + "min":1 + }, + "BoundedLengthString":{ + "type":"string", + "max":5000, + "min":1, + "pattern":"[\\P{M}\\p{M}]{1,5000}" + }, + "ComponentTimestampDelimiter":{ + "type":"string", + "max":1, + "min":0, + "pattern":"^(\\-|\\_|\\s)?$" + }, + "ConflictException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "documentation":"

The request could not be completed due to a conflict with the current state of the target resource.

", + "exception":true + }, + "CreateDatasetRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "DatasetSchema", + "ClientToken" + ], + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset being created.

" + }, + "DatasetSchema":{ + "shape":"DatasetSchema", + "documentation":"

A JSON description of the data that is in each time series dataset, including names, column names, and data types.

" + }, + "ServerSideKmsKeyId":{ + "shape":"NameOrArn", + "documentation":"

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt dataset data by Amazon Lookout for Equipment.

" + }, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags associated with the ingested data described in the dataset.

" + } + } + }, + "CreateDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset being created.

" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset being created.

" + }, + "Status":{ + "shape":"DatasetStatus", + "documentation":"

Indicates the status of the CreateDataset operation.

" + } + } + }, + "CreateInferenceSchedulerRequest":{ + "type":"structure", + "required":[ + "ModelName", + "InferenceSchedulerName", + "DataUploadFrequency", + "DataInputConfiguration", + "DataOutputConfiguration", + "RoleArn", + "ClientToken" + ], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the previously trained ML model being used to create the inference scheduler.

" + }, + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerName", + "documentation":"

The name of the inference scheduler being created.

" + }, + "DataDelayOffsetInMinutes":{ + "shape":"DataDelayOffsetInMinutes", + "documentation":"

A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.

" + }, + "DataUploadFrequency":{ + "shape":"DataUploadFrequency", + "documentation":"

How often data is uploaded to the source S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes.

" + }, + "DataInputConfiguration":{ + "shape":"InferenceInputConfiguration", + "documentation":"

Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location.

" + }, + "DataOutputConfiguration":{ + "shape":"InferenceOutputConfiguration", + "documentation":"

Specifies configuration information for the output results for the inference scheduler, including the S3 location for the output.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source being used for the inference.

" + }, + "ServerSideKmsKeyId":{ + "shape":"NameOrArn", + "documentation":"

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt inference scheduler data by Amazon Lookout for Equipment.

" + }, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags associated with the inference scheduler.

" + } + } + }, + "CreateInferenceSchedulerResponse":{ + "type":"structure", + "members":{ + "InferenceSchedulerArn":{ + "shape":"InferenceSchedulerArn", + "documentation":"

The Amazon Resource Name (ARN) of the inference scheduler being created.

" + }, + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerName", + "documentation":"

The name of inference scheduler being created.

" + }, + "Status":{ + "shape":"InferenceSchedulerStatus", + "documentation":"

Indicates the status of the CreateInferenceScheduler operation.

" + } + } + }, + "CreateModelRequest":{ + "type":"structure", + "required":[ + "ModelName", + "DatasetName", + "ClientToken" + ], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name for the ML model to be created.

" + }, + "DatasetName":{ + "shape":"DatasetIdentifier", + "documentation":"

The name of the dataset for the ML model being created.

" + }, + "DatasetSchema":{ + "shape":"DatasetSchema", + "documentation":"

The data schema for the ML model being created.

" + }, + "LabelsInputConfiguration":{ + "shape":"LabelsInputConfiguration", + "documentation":"

The input configuration for the labels being used for the ML model that's being created.

" + }, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "idempotencyToken":true + }, + "TrainingDataStartTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset that should be used to begin the subset of training data for the ML model.

" + }, + "TrainingDataEndTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset that should be used to end the subset of training data for the ML model.

" + }, + "EvaluationDataStartTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset that should be used to begin the subset of evaluation data for the ML model.

" + }, + "EvaluationDataEndTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset that should be used to end the subset of evaluation data for the ML model.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source being used to create the ML model.

" + }, + "DataPreProcessingConfiguration":{ + "shape":"DataPreProcessingConfiguration", + "documentation":"

The configuration is the TargetSamplingRate, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the TargetSamplingRate is 1 minute.

When providing a value for the TargetSamplingRate, you must attach the prefix \"PT\" to the rate you want. The value for a 1 second rate is therefore PT1S, the value for a 15 minute rate is PT15M, and the value for a 1 hour rate is PT1H

" + }, + "ServerSideKmsKeyId":{ + "shape":"NameOrArn", + "documentation":"

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt model data by Amazon Lookout for Equipment.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags associated with the ML model being created.

" + } + } + }, + "CreateModelResponse":{ + "type":"structure", + "members":{ + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the model being created.

" + }, + "Status":{ + "shape":"ModelStatus", + "documentation":"

Indicates the status of the CreateModel operation.

" + } + } + }, + "DataDelayOffsetInMinutes":{ + "type":"long", + "max":60, + "min":0 + }, + "DataIngestionJobSummaries":{ + "type":"list", + "member":{"shape":"DataIngestionJobSummary"} + }, + "DataIngestionJobSummary":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"IngestionJobId", + "documentation":"

Indicates the job ID of the data ingestion job.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset used for the data ingestion job.

" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset used in the data ingestion job.

" + }, + "IngestionInputConfiguration":{ + "shape":"IngestionInputConfiguration", + "documentation":"

Specifies information for the input data for the data inference job, including data S3 location parameters.

" + }, + "Status":{ + "shape":"IngestionJobStatus", + "documentation":"

Indicates the status of the data ingestion job.

" + } + }, + "documentation":"

Provides information about a specified data ingestion job, including dataset information, data ingestion configuration, and status.

" + }, + "DataPreProcessingConfiguration":{ + "type":"structure", + "members":{ + "TargetSamplingRate":{ + "shape":"TargetSamplingRate", + "documentation":"

The sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the TargetSamplingRate is 1 minute.

When providing a value for the TargetSamplingRate, you must attach the prefix \"PT\" to the rate you want. The value for a 1 second rate is therefore PT1S, the value for a 15 minute rate is PT15M, and the value for a 1 hour rate is PT1H

" + } + }, + "documentation":"

The configuration is the TargetSamplingRate, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the TargetSamplingRate is 1 minute.

When providing a value for the TargetSamplingRate, you must attach the prefix \"PT\" to the rate you want. The value for a 1 second rate is therefore PT1S, the value for a 15 minute rate is PT15M, and the value for a 1 hour rate is PT1H

" + }, + "DataUploadFrequency":{ + "type":"string", + "enum":[ + "PT5M", + "PT10M", + "PT15M", + "PT30M", + "PT1H" + ] + }, + "DatasetArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:dataset\\/.+" + }, + "DatasetIdentifier":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "DatasetName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "DatasetSchema":{ + "type":"structure", + "members":{ + "InlineDataSchema":{ + "shape":"InlineDataSchema", + "documentation":"

", + "jsonvalue":true + } + }, + "documentation":"

Provides information about the data schema used with the given dataset.

" + }, + "DatasetStatus":{ + "type":"string", + "enum":[ + "CREATED", + "INGESTION_IN_PROGRESS", + "ACTIVE" + ] + }, + "DatasetSummaries":{ + "type":"list", + "member":{"shape":"DatasetSummary"} + }, + "DatasetSummary":{ + "type":"structure", + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset.

" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the specified dataset.

" + }, + "Status":{ + "shape":"DatasetStatus", + "documentation":"

Indicates the status of the dataset.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the dataset was created in Amazon Lookout for Equipment.

" + } + }, + "documentation":"

Contains information about the specific data set, including name, ARN, and status.

" + }, + "DeleteDatasetRequest":{ + "type":"structure", + "required":["DatasetName"], + "members":{ + "DatasetName":{ + "shape":"DatasetIdentifier", + "documentation":"

The name of the dataset to be deleted.

" + } + } + }, + "DeleteInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerIdentifier", + "documentation":"

The name of the inference scheduler to be deleted.

" + } + } + }, + "DeleteModelRequest":{ + "type":"structure", + "required":["ModelName"], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model to be deleted.

" + } + } + }, + "DescribeDataIngestionJobRequest":{ + "type":"structure", + "required":["JobId"], + "members":{ + "JobId":{ + "shape":"IngestionJobId", + "documentation":"

The job ID of the data ingestion job.

" + } + } + }, + "DescribeDataIngestionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"IngestionJobId", + "documentation":"

Indicates the job ID of the data ingestion job.

" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset being used in the data ingestion job.

" + }, + "IngestionInputConfiguration":{ + "shape":"IngestionInputConfiguration", + "documentation":"

Specifies the S3 location configuration for the data input for the data ingestion job.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of an IAM role with permission to access the data source being ingested.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the data ingestion job was created.

" + }, + "Status":{ + "shape":"IngestionJobStatus", + "documentation":"

Indicates the status of the DataIngestionJob operation.

" + }, + "FailedReason":{ + "shape":"BoundedLengthString", + "documentation":"

Specifies the reason for failure when a data ingestion job has failed.

" + } + } + }, + "DescribeDatasetRequest":{ + "type":"structure", + "required":["DatasetName"], + "members":{ + "DatasetName":{ + "shape":"DatasetIdentifier", + "documentation":"

The name of the dataset to be described.

" + } + } + }, + "DescribeDatasetResponse":{ + "type":"structure", + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset being described.

" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset being described.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

Specifies the time the dataset was created in Amazon Lookout for Equipment.

" + }, + "LastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

Specifies the time the dataset was last updated, if it was.

" + }, + "Status":{ + "shape":"DatasetStatus", + "documentation":"

Indicates the status of the dataset.

" + }, + "Schema":{ + "shape":"InlineDataSchema", + "documentation":"

A JSON description of the data that is in each time series dataset, including names, column names, and data types.

", + "jsonvalue":true + }, + "ServerSideKmsKeyId":{ + "shape":"KmsKeyArn", + "documentation":"

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt dataset data by Amazon Lookout for Equipment.

" + }, + "IngestionInputConfiguration":{ + "shape":"IngestionInputConfiguration", + "documentation":"

Specifies the S3 location configuration for the data input for the data ingestion job.

" + } + } + }, + "DescribeInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerIdentifier", + "documentation":"

The name of the inference scheduler being described.

" + } + } + }, + "DescribeInferenceSchedulerResponse":{ + "type":"structure", + "members":{ + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the ML model of the inference scheduler being described.

" + }, + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model of the inference scheduler being described.

" + }, + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerName", + "documentation":"

The name of the inference scheduler being described.

" + }, + "InferenceSchedulerArn":{ + "shape":"InferenceSchedulerArn", + "documentation":"

The Amazon Resource Name (ARN) of the inference scheduler being described.

" + }, + "Status":{ + "shape":"InferenceSchedulerStatus", + "documentation":"

Indicates the status of the inference scheduler.

" + }, + "DataDelayOffsetInMinutes":{ + "shape":"DataDelayOffsetInMinutes", + "documentation":"

A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.

" + }, + "DataUploadFrequency":{ + "shape":"DataUploadFrequency", + "documentation":"

Specifies how often data is uploaded to the source S3 bucket for the input data. This value is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

Specifies the time at which the inference scheduler was created.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

Specifies the time at which the inference scheduler was last updated, if it was.

" + }, + "DataInputConfiguration":{ + "shape":"InferenceInputConfiguration", + "documentation":"

Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location.

" + }, + "DataOutputConfiguration":{ + "shape":"InferenceOutputConfiguration", + "documentation":"

Specifies information for the output results for the inference scheduler, including the output S3 location.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source for the inference scheduler being described.

" + }, + "ServerSideKmsKeyId":{ + "shape":"KmsKeyArn", + "documentation":"

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt inference scheduler data by Amazon Lookout for Equipment.

" + } + } + }, + "DescribeModelRequest":{ + "type":"structure", + "required":["ModelName"], + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model to be described.

" + } + } + }, + "DescribeModelResponse":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model being described.

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the ML model being described.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset being used by the ML being described.

" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resouce Name (ARN) of the dataset used to create the ML model being described.

" + }, + "Schema":{ + "shape":"InlineDataSchema", + "documentation":"

A JSON description of the data that is in each time series dataset, including names, column names, and data types.

", + "jsonvalue":true + }, + "LabelsInputConfiguration":{ + "shape":"LabelsInputConfiguration", + "documentation":"

Specifies configuration information about the labels input, including its S3 location.

" + }, + "TrainingDataStartTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset that was used to begin the subset of training data for the ML model.

" + }, + "TrainingDataEndTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset that was used to end the subset of training data for the ML model.

" + }, + "EvaluationDataStartTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset that was used to begin the subset of evaluation data for the ML model.

" + }, + "EvaluationDataEndTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset that was used to end the subset of evaluation data for the ML model.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source for the ML model being described.

" + }, + "DataPreProcessingConfiguration":{ + "shape":"DataPreProcessingConfiguration", + "documentation":"

The configuration is the TargetSamplingRate, which is the sampling rate of the data after post processing by Amazon Lookout for Equipment. For example, if you provide data that has been collected at a 1 second level and you want the system to resample the data at a 1 minute rate before training, the TargetSamplingRate is 1 minute.

When providing a value for the TargetSamplingRate, you must attach the prefix \"PT\" to the rate you want. The value for a 1 second rate is therefore PT1S, the value for a 15 minute rate is PT15M, and the value for a 1 hour rate is PT1H

" + }, + "Status":{ + "shape":"ModelStatus", + "documentation":"

Specifies the current status of the model being described. Status describes the status of the most recent action of the model.

" + }, + "TrainingExecutionStartTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time at which the training of the ML model began.

" + }, + "TrainingExecutionEndTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time at which the training of the ML model was completed.

" + }, + "FailedReason":{ + "shape":"BoundedLengthString", + "documentation":"

If the training of the ML model failed, this indicates the reason for that failure.

" + }, + "ModelMetrics":{ + "shape":"ModelMetrics", + "documentation":"

The Model Metrics show an aggregated summary of the model's performance within the evaluation time range. This is the JSON content of the metrics created when evaluating the model.

", + "jsonvalue":true + }, + "LastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the last time the ML model was updated. The type of update is not specified.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

Indicates the time and date at which the ML model was created.

" + }, + "ServerSideKmsKeyId":{ + "shape":"KmsKeyArn", + "documentation":"

Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt model data by Amazon Lookout for Equipment.

" + } + } + }, + "FileNameTimestampFormat":{ + "type":"string", + "pattern":"^EPOCH|yyyy-MM-dd-HH-mm-ss|yyyyMMddHHmmss$" + }, + "IamRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+" + }, + "IdempotenceToken":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\p{ASCII}{1,256}" + }, + "InferenceExecutionStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCESS", + "FAILED" + ] + }, + "InferenceExecutionSummaries":{ + "type":"list", + "member":{"shape":"InferenceExecutionSummary"} + }, + "InferenceExecutionSummary":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model being used for the inference execution.

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the ML model used for the inference execution.

" + }, + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerName", + "documentation":"

The name of the inference scheduler being used for the inference execution.

" + }, + "InferenceSchedulerArn":{ + "shape":"InferenceSchedulerArn", + "documentation":"

The Amazon Resource Name (ARN) of the inference scheduler being used for the inference execution.

" + }, + "ScheduledStartTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the start time at which the inference scheduler began the specific inference execution.

" + }, + "DataStartTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset at which the inference execution began.

" + }, + "DataEndTime":{ + "shape":"Timestamp", + "documentation":"

Indicates the time reference in the dataset at which the inference execution stopped.

" + }, + "DataInputConfiguration":{ + "shape":"InferenceInputConfiguration", + "documentation":"

Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location.

" + }, + "DataOutputConfiguration":{ + "shape":"InferenceOutputConfiguration", + "documentation":"

Specifies configuration information for the output results from for the inference execution, including the output S3 location.

" + }, + "CustomerResultObject":{ + "shape":"S3Object", + "documentation":"

" + }, + "Status":{ + "shape":"InferenceExecutionStatus", + "documentation":"

Indicates the status of the inference execution.

" + }, + "FailedReason":{ + "shape":"BoundedLengthString", + "documentation":"

Specifies the reason for failure when an inference execution has failed.

" + } + }, + "documentation":"

Contains information about the specific inference execution, including input and output data configuration, inference scheduling information, status, and so on.

" + }, + "InferenceInputConfiguration":{ + "type":"structure", + "members":{ + "S3InputConfiguration":{ + "shape":"InferenceS3InputConfiguration", + "documentation":"

Specifies configuration information for the input data for the inference, including S3 location of input data..

" + }, + "InputTimeZoneOffset":{ + "shape":"TimeZoneOffset", + "documentation":"

Indicates the difference between your time zone and Greenwich Mean Time (GMT).

" + }, + "InferenceInputNameConfiguration":{ + "shape":"InferenceInputNameConfiguration", + "documentation":"

> Specifies configuration information for the input data for the inference, including timestamp format and delimiter.

" + } + }, + "documentation":"

> Specifies configuration information for the input data for the inference, including S3 location of input data..

" + }, + "InferenceInputNameConfiguration":{ + "type":"structure", + "members":{ + "TimestampFormat":{ + "shape":"FileNameTimestampFormat", + "documentation":"

The format of the timestamp, whether Epoch time, or standard, with or without hyphens (-).

" + }, + "ComponentTimestampDelimiter":{ + "shape":"ComponentTimestampDelimiter", + "documentation":"

Indicates the delimiter character used between items in the data.

" + } + }, + "documentation":"

>> Specifies configuration information for the input data for the inference, including timestamp format and delimiter.

" + }, + "InferenceOutputConfiguration":{ + "type":"structure", + "required":["S3OutputConfiguration"], + "members":{ + "S3OutputConfiguration":{ + "shape":"InferenceS3OutputConfiguration", + "documentation":"

Specifies configuration information for the output results from for the inference, output S3 location.

" + }, + "KmsKeyId":{ + "shape":"NameOrArn", + "documentation":"

The ID number for the AWS KMS key used to encrypt the inference output.

" + } + }, + "documentation":"

Specifies configuration information for the output results from for the inference, including KMS key ID and output S3 location.

" + }, + "InferenceS3InputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"S3Bucket", + "documentation":"

The bucket containing the input dataset for the inference.

" + }, + "Prefix":{ + "shape":"S3Prefix", + "documentation":"

The prefix for the S3 bucket used for the input data for the inference.

" + } + }, + "documentation":"

Specifies configuration information for the input data for the inference, including input data S3 location.

" + }, + "InferenceS3OutputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"S3Bucket", + "documentation":"

The bucket containing the output results from the inference

" + }, + "Prefix":{ + "shape":"S3Prefix", + "documentation":"

The prefix for the S3 bucket used for the output results from the inference.

" + } + }, + "documentation":"

Specifies configuration information for the output results from the inference, including output S3 location.

" + }, + "InferenceSchedulerArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:inference-scheduler\\/.+" + }, + "InferenceSchedulerIdentifier":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "InferenceSchedulerName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "InferenceSchedulerStatus":{ + "type":"string", + "enum":[ + "PENDING", + "RUNNING", + "STOPPING", + "STOPPED" + ] + }, + "InferenceSchedulerSummaries":{ + "type":"list", + "member":{"shape":"InferenceSchedulerSummary"} + }, + "InferenceSchedulerSummary":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model used for the inference scheduler.

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the ML model used by the inference scheduler.

" + }, + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerName", + "documentation":"

The name of the inference scheduler.

" + }, + "InferenceSchedulerArn":{ + "shape":"InferenceSchedulerArn", + "documentation":"

The Amazon Resource Name (ARN) of the inference scheduler.

" + }, + "Status":{ + "shape":"InferenceSchedulerStatus", + "documentation":"

Indicates the status of the inference scheduler.

" + }, + "DataDelayOffsetInMinutes":{ + "shape":"DataDelayOffsetInMinutes", + "documentation":"

> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if an offset delay time of five minutes was selected, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.

" + }, + "DataUploadFrequency":{ + "shape":"DataUploadFrequency", + "documentation":"

How often data is uploaded to the source S3 bucket for the input data. This value is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes.

" + } + }, + "documentation":"

Contains information about the specific inference scheduler, including data delay offset, model name and ARN, status, and so on.

" + }, + "IngestionInputConfiguration":{ + "type":"structure", + "required":["S3InputConfiguration"], + "members":{ + "S3InputConfiguration":{ + "shape":"IngestionS3InputConfiguration", + "documentation":"

The location information for the S3 bucket used for input data for the data ingestion.

" + } + }, + "documentation":"

Specifies configuration information for the input data for the data ingestion job, including input data S3 location.

" + }, + "IngestionJobId":{ + "type":"string", + "max":32, + "pattern":"[A-Fa-f0-9]{0,32}" + }, + "IngestionJobStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCESS", + "FAILED" + ] + }, + "IngestionS3InputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"S3Bucket", + "documentation":"

The name of the S3 bucket used for the input data for the data ingestion.

" + }, + "Prefix":{ + "shape":"S3Prefix", + "documentation":"

The prefix for the S3 location being used for the input data for the data ingestion.

" + } + }, + "documentation":"

Specifies S3 configuration information for the input data for the data ingestion job.

" + }, + "InlineDataSchema":{ + "type":"string", + "max":1000000, + "min":1 + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "documentation":"

Processing of the request has failed because of an unknown error, exception or failure.

", + "exception":true, + "fault":true + }, + "KmsKeyArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:aws[a-z\\-]*:kms:[a-z0-9\\-]*:\\d{12}:[\\w\\-\\/]+" + }, + "LabelsInputConfiguration":{ + "type":"structure", + "required":["S3InputConfiguration"], + "members":{ + "S3InputConfiguration":{ + "shape":"LabelsS3InputConfiguration", + "documentation":"

Contains location information for the S3 location being used for label data.

" + } + }, + "documentation":"

Contains the configuration information for the S3 location being used to hold label data.

" + }, + "LabelsS3InputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"S3Bucket", + "documentation":"

The name of the S3 bucket holding the label data.

" + }, + "Prefix":{ + "shape":"S3Prefix", + "documentation":"

The prefix for the S3 bucket used for the label data.

" + } + }, + "documentation":"

The location information (prefix and bucket name) for the s3 location being used for label data.

" + }, + "ListDataIngestionJobsRequest":{ + "type":"structure", + "members":{ + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset being used for the data ingestion job.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of data ingestion jobs.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Specifies the maximum number of data ingestion jobs to list.

" + }, + "Status":{ + "shape":"IngestionJobStatus", + "documentation":"

Indicates the status of the data ingestion job.

" + } + } + }, + "ListDataIngestionJobsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of data ingestion jobs.

" + }, + "DataIngestionJobSummaries":{ + "shape":"DataIngestionJobSummaries", + "documentation":"

Specifies information about the specific data ingestion job, including dataset name and status.

" + } + } + }, + "ListDatasetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of datasets.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Specifies the maximum number of datasets to list.

" + }, + "DatasetNameBeginsWith":{ + "shape":"DatasetName", + "documentation":"

The beginning of the name of the datasets to be listed.

" + } + } + }, + "ListDatasetsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of datasets.

" + }, + "DatasetSummaries":{ + "shape":"DatasetSummaries", + "documentation":"

Provides information about the specified dataset, including creation time, dataset ARN, and status.

" + } + } + }, + "ListInferenceExecutionsRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of inference executions.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Specifies the maximum number of inference executions to list.

" + }, + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerIdentifier", + "documentation":"

The name of the inference scheduler for the inference execution listed.

" + }, + "DataStartTimeAfter":{ + "shape":"Timestamp", + "documentation":"

The time reference in the inferenced dataset after which Amazon Lookout for Equipment started the inference execution.

" + }, + "DataEndTimeBefore":{ + "shape":"Timestamp", + "documentation":"

The time reference in the inferenced dataset before which Amazon Lookout for Equipment stopped the inference execution.

" + }, + "Status":{ + "shape":"InferenceExecutionStatus", + "documentation":"

The status of the inference execution.

" + } + } + }, + "ListInferenceExecutionsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of inference executions.

" + }, + "InferenceExecutionSummaries":{ + "shape":"InferenceExecutionSummaries", + "documentation":"

Provides an array of information about the individual inference executions returned from the ListInferenceExecutions operation, including model used, inference scheduler, data configuration, and so on.

" + } + } + }, + "ListInferenceSchedulersRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of inference schedulers.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Specifies the maximum number of inference schedulers to list.

" + }, + "InferenceSchedulerNameBeginsWith":{ + "shape":"InferenceSchedulerIdentifier", + "documentation":"

The beginning of the name of the inference schedulers to be listed.

" + }, + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model used by the inference scheduler to be listed.

" + } + } + }, + "ListInferenceSchedulersResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of inference schedulers.

" + }, + "InferenceSchedulerSummaries":{ + "shape":"InferenceSchedulerSummaries", + "documentation":"

Provides information about the specified inference scheduler, including data upload frequency, model name and ARN, and status.

" + } + } + }, + "ListModelsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of ML models.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

Specifies the maximum number of ML models to list.

" + }, + "Status":{ + "shape":"ModelStatus", + "documentation":"

The status of the ML model.

" + }, + "ModelNameBeginsWith":{ + "shape":"ModelName", + "documentation":"

The beginning of the name of the ML models being listed.

" + }, + "DatasetNameBeginsWith":{ + "shape":"DatasetName", + "documentation":"

The beginning of the name of the dataset of the ML models to be listed.

" + } + } + }, + "ListModelsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

An opaque pagination token indicating where to continue the listing of ML models.

" + }, + "ModelSummaries":{ + "shape":"ModelSummaries", + "documentation":"

Provides information on the specified model, including created time, model and dataset ARNs, and status.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource (such as the dataset or model) that is the focus of the ListTagsForResource operation.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

Any tags associated with the resource.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":500, + "min":1 + }, + "ModelArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/.+" + }, + "ModelMetrics":{ + "type":"string", + "max":50000, + "min":1 + }, + "ModelName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z_-]{1,200}$" + }, + "ModelStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCESS", + "FAILED" + ] + }, + "ModelSummaries":{ + "type":"list", + "member":{"shape":"ModelSummary"} + }, + "ModelSummary":{ + "type":"structure", + "members":{ + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model.

" + }, + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the ML model.

" + }, + "DatasetName":{ + "shape":"DatasetName", + "documentation":"

The name of the dataset being used for the ML model.

" + }, + "DatasetArn":{ + "shape":"DatasetArn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset used to create the model.

" + }, + "Status":{ + "shape":"ModelStatus", + "documentation":"

Indicates the status of the ML model.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The time at which the specific model was created.

" + } + }, + "documentation":"

Provides information about the specified ML model, including dataset and model names and ARNs, as well as status.

" + }, + "NameOrArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$" + }, + "NextToken":{ + "type":"string", + "max":8192, + "pattern":"\\p{ASCII}{0,8192}" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "documentation":"

The resource requested could not be found. Verify the resource ID and retry your request.

", + "exception":true + }, + "S3Bucket":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$" + }, + "S3Key":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\P{M}\\p{M}]{1,1024}[^/]$" + }, + "S3Object":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{ + "shape":"S3Bucket", + "documentation":"

The name of the specific S3 bucket.

" + }, + "Key":{ + "shape":"S3Key", + "documentation":"

The AWS Key Management Service (AWS KMS) key being used to encrypt the S3 object. Without this key, data in the bucket is not accessible.

" + } + }, + "documentation":"

Contains information about an S3 bucket.

" + }, + "S3Prefix":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"(^$)|([\\P{M}\\p{M}]{1,1023}/$)" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "documentation":"

Resource limitations have been exceeded.

", + "exception":true + }, + "StartDataIngestionJobRequest":{ + "type":"structure", + "required":[ + "DatasetName", + "IngestionInputConfiguration", + "RoleArn", + "ClientToken" + ], + "members":{ + "DatasetName":{ + "shape":"DatasetIdentifier", + "documentation":"

The name of the dataset being used by the data ingestion job.

" + }, + "IngestionInputConfiguration":{ + "shape":"IngestionInputConfiguration", + "documentation":"

Specifies information for the input data for the data ingestion job, including dataset S3 location.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source for the data ingestion job.

" + }, + "ClientToken":{ + "shape":"IdempotenceToken", + "documentation":"

A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one.

", + "idempotencyToken":true + } + } + }, + "StartDataIngestionJobResponse":{ + "type":"structure", + "members":{ + "JobId":{ + "shape":"IngestionJobId", + "documentation":"

Indicates the job ID of the data ingestion job.

" + }, + "Status":{ + "shape":"IngestionJobStatus", + "documentation":"

Indicates the status of the StartDataIngestionJob operation.

" + } + } + }, + "StartInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerIdentifier", + "documentation":"

The name of the inference scheduler to be started.

" + } + } + }, + "StartInferenceSchedulerResponse":{ + "type":"structure", + "members":{ + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the ML model being used by the inference scheduler.

" + }, + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model being used by the inference scheduler.

" + }, + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerName", + "documentation":"

The name of the inference scheduler being started.

" + }, + "InferenceSchedulerArn":{ + "shape":"InferenceSchedulerArn", + "documentation":"

The Amazon Resource Name (ARN) of the inference scheduler being started.

" + }, + "Status":{ + "shape":"InferenceSchedulerStatus", + "documentation":"

Indicates the status of the inference scheduler.

" + } + } + }, + "StopInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerIdentifier", + "documentation":"

The name of the inference scheduler to be stopped.

" + } + } + }, + "StopInferenceSchedulerResponse":{ + "type":"structure", + "members":{ + "ModelArn":{ + "shape":"ModelArn", + "documentation":"

The Amazon Resource Name (ARN) of the ML model used by the inference scheduler being stopped.

" + }, + "ModelName":{ + "shape":"ModelName", + "documentation":"

The name of the ML model used by the inference scheduler being stopped.

" + }, + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerName", + "documentation":"

The name of the inference scheduler being stopped.

" + }, + "InferenceSchedulerArn":{ + "shape":"InferenceSchedulerArn", + "documentation":"

The Amazon Resource Name (ARN) of the inference schedule being stopped.

" + }, + "Status":{ + "shape":"InferenceSchedulerStatus", + "documentation":"

Indicates the status of the inference scheduler.

" + } + } + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key for the specified tag.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value for the specified tag.

" + } + }, + "documentation":"

A tag is a key-value pair that can be added to a resource as metadata.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the specific resource to which the tag should be associated.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tag or tags to be associated with a specific resource. Both the tag key and value are specified.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\s\\w+-=\\.:/@]*" + }, + "TargetSamplingRate":{ + "type":"string", + "enum":[ + "PT1S", + "PT5S", + "PT10S", + "PT15S", + "PT30S", + "PT1M", + "PT5M", + "PT10M", + "PT15M", + "PT30M", + "PT1H" + ] + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "documentation":"

The request was denied due to request throttling.

", + "exception":true + }, + "TimeZoneOffset":{ + "type":"string", + "pattern":"^(\\+|\\-)[0-9]{2}\\:[0-9]{2}$" + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to which the tag is currently associated.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

Specifies the key of the tag to be removed from a specified resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateInferenceSchedulerRequest":{ + "type":"structure", + "required":["InferenceSchedulerName"], + "members":{ + "InferenceSchedulerName":{ + "shape":"InferenceSchedulerIdentifier", + "documentation":"

The name of the inference scheduler to be updated.

" + }, + "DataDelayOffsetInMinutes":{ + "shape":"DataDelayOffsetInMinutes", + "documentation":"

> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.

" + }, + "DataUploadFrequency":{ + "shape":"DataUploadFrequency", + "documentation":"

How often data is uploaded to the source S3 bucket for the input data. The value chosen is the length of time between data uploads. For instance, if you select 5 minutes, Amazon Lookout for Equipment will upload the real-time data to the source bucket once every 5 minutes. This frequency also determines how often Amazon Lookout for Equipment starts a scheduled inference on your data. In this example, it starts once every 5 minutes.

" + }, + "DataInputConfiguration":{ + "shape":"InferenceInputConfiguration", + "documentation":"

Specifies information for the input data for the inference scheduler, including delimiter, format, and dataset location.

" + }, + "DataOutputConfiguration":{ + "shape":"InferenceOutputConfiguration", + "documentation":"

Specifies information for the output results from the inference scheduler, including the output S3 location.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of a role with permission to access the data source for the inference scheduler.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{"shape":"BoundedLengthString"} + }, + "documentation":"

The input fails to satisfy constraints specified by Amazon Lookout for Equipment or a related AWS service that's being utilized.

", + "exception":true + } + }, + "documentation":"

Amazon Lookout for Equipment is a machine learning service that uses advanced analytics to identify anomalies in machines from sensor data for use in predictive maintenance.

" +} diff --git a/botocore/data/ram/2018-01-04/service-2.json b/botocore/data/ram/2018-01-04/service-2.json index 3f22239826..ebdfaaa20d 100644 --- a/botocore/data/ram/2018-01-04/service-2.json +++ b/botocore/data/ram/2018-01-04/service-2.json @@ -557,7 +557,7 @@ }, "principals":{ "shape":"PrincipalArnOrIdList", - "documentation":"

The principals.

" + "documentation":"

The principals to associate with the resource share. The possible values are IDs of AWS accounts, and the ARNs of organizational units (OU) or organizations from AWS Organizations.

" }, "clientToken":{ "shape":"String", @@ -1077,7 +1077,7 @@ }, "resourceType":{ "shape":"String", - "documentation":"

The resource type.

Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster | route53resolver:ResolverRule

" + "documentation":"

The resource type.

Valid values: acm-pca:CertificateAuthority | appmesh:Mesh | codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:LocalGatewayRouteTable | ec2:PrefixList | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | imagebuilder:ContainerRecipe | glue:Catalog | glue:Database | glue:Table | license-manager:LicenseConfiguration I network-firewall:FirewallPolicy | network-firewall:StatefulRuleGroup | network-firewall:StatelessRuleGroup | outposts:Outpost | resource-groups:Group | rds:Cluster | route53resolver:FirewallRuleGroup |route53resolver:ResolverQueryLogConfig | route53resolver:ResolverRule

" }, "resourceShareArns":{ "shape":"ResourceShareArnList", @@ -1177,7 +1177,7 @@ }, "resourceType":{ "shape":"String", - "documentation":"

The resource type.

Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster | route53resolver:ResolverRule

" + "documentation":"

The resource type.

Valid values: acm-pca:CertificateAuthority | appmesh:Mesh | codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation | ec2:DedicatedHost | ec2:LocalGatewayRouteTable | ec2:PrefixList | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe | imagebuilder:ContainerRecipe | glue:Catalog | glue:Database | glue:Table | license-manager:LicenseConfiguration I network-firewall:FirewallPolicy | network-firewall:StatefulRuleGroup | network-firewall:StatelessRuleGroup | outposts:Outpost | resource-groups:Group | rds:Cluster | route53resolver:FirewallRuleGroup |route53resolver:ResolverQueryLogConfig | route53resolver:ResolverRule

" }, "resourceArns":{ "shape":"ResourceArnList", diff --git a/botocore/data/robomaker/2018-06-29/service-2.json b/botocore/data/robomaker/2018-06-29/service-2.json index 0ec521fa03..1e7f7b0bf4 100644 --- a/botocore/data/robomaker/2018-06-29/service-2.json +++ b/botocore/data/robomaker/2018-06-29/service-2.json @@ -2116,7 +2116,11 @@ "PostLaunchFileFailure", "BadPermissionError", "DownloadConditionFailed", - "InternalServerError" + "BadLambdaAssociated", + "InternalServerError", + "RobotApplicationDoesNotExist", + "DeploymentFleetDoesNotExist", + "FleetDeploymentTimeout" ] }, "DeploymentJobs":{ @@ -2836,6 +2840,13 @@ "min":1, "pattern":".*" }, + "ExitBehavior":{ + "type":"string", + "enum":[ + "FAIL", + "RESTART" + ] + }, "FailedAt":{"type":"timestamp"}, "FailedCreateSimulationJobRequest":{ "type":"structure", @@ -3751,6 +3762,14 @@ "useDefaultUploadConfigurations":{ "shape":"BoxedBoolean", "documentation":"

A Boolean indicating whether to use default upload configurations. By default, .ros and .gazebo files are uploaded when the application terminates and all ROS topics will be recorded.

If you set this value, you must specify an outputLocation.

" + }, + "tools":{ + "shape":"Tools", + "documentation":"

Information about tools configured for the robot application.

" + }, + "useDefaultTools":{ + "shape":"BoxedBoolean", + "documentation":"

A Boolean indicating whether to use default robot application tools. The default tools are rviz, rqt, terminal and rosbag record. The default is False.

" } }, "documentation":"

Application configuration information for a robot.

" @@ -3998,6 +4017,14 @@ "useDefaultUploadConfigurations":{ "shape":"BoxedBoolean", "documentation":"

A Boolean indicating whether to use default upload configurations. By default, .ros and .gazebo files are uploaded when the application terminates and all ROS topics will be recorded.

If you set this value, you must specify an outputLocation.

" + }, + "tools":{ + "shape":"Tools", + "documentation":"

Information about tools configured for the simulation application.

" + }, + "useDefaultTools":{ + "shape":"BoxedBoolean", + "documentation":"

A Boolean indicating whether to use default simulation application tools. The default tools are rviz, rqt, terminal and rosbag record. The default is False.

" } }, "documentation":"

Information about a simulation application configuration.

" @@ -4200,6 +4227,8 @@ "InternalServiceError", "RobotApplicationCrash", "SimulationApplicationCrash", + "RobotApplicationHealthCheckFailure", + "SimulationApplicationHealthCheckFailure", "BadPermissionsRobotApplication", "BadPermissionsSimulationApplication", "BadPermissionsS3Object", @@ -4211,6 +4240,7 @@ "InvalidBundleRobotApplication", "InvalidBundleSimulationApplication", "InvalidS3Resource", + "ThrottlingError", "LimitExceeded", "MismatchedEtag", "RobotApplicationVersionMismatchedEtag", @@ -4652,6 +4682,48 @@ "error":{"httpStatusCode":400}, "exception":true }, + "Tool":{ + "type":"structure", + "required":[ + "name", + "command" + ], + "members":{ + "streamUI":{ + "shape":"BoxedBoolean", + "documentation":"

Boolean indicating whether a streaming session will be configured for the tool. If True, AWS RoboMaker will configure a connection so you can interact with the tool as it is running in the simulation. It must have a graphical user interface. The default is False.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the tool.

" + }, + "command":{ + "shape":"UnrestrictedCommand", + "documentation":"

Command-line arguments for the tool. It must include the tool executable name.

" + }, + "streamOutputToCloudWatch":{ + "shape":"BoxedBoolean", + "documentation":"

Boolean indicating whether logs will be recorded in CloudWatch for the tool. The default is False.

" + }, + "exitBehavior":{ + "shape":"ExitBehavior", + "documentation":"

Exit behavior determines what happens when your tool quits running. RESTART will cause your tool to be restarted. FAIL will cause your job to exit. The default is RESTART.

" + } + }, + "documentation":"

Information about a tool. Tools are used in a simulation job.

" + }, + "Tools":{ + "type":"list", + "member":{"shape":"Tool"}, + "max":10, + "min":0 + }, + "UnrestrictedCommand":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -4881,7 +4953,7 @@ }, "uploadBehavior":{ "shape":"UploadBehavior", - "documentation":"

Specifies how to upload the files:

UPLOAD_ON_TERMINATE

Matching files are uploaded once the simulation enters the TERMINATING state. Matching files are not uploaded until all of your code (including tools) have stopped.

If there is a problem uploading a file, the upload is retried. If problems persist, no further upload attempts will be made.

UPLOAD_ROLLING_AUTO_REMOVE

Matching files are uploaded as they are created. They are deleted after they are uploaded. The specified path is checked every 5 seconds. A final check is made when all of your code (including tools) have stopped.

" + "documentation":"

Specifies when to upload the files:

UPLOAD_ON_TERMINATE

Matching files are uploaded once the simulation enters the TERMINATING state. Matching files are not uploaded until all of your code (including tools) have stopped.

If there is a problem uploading a file, the upload is retried. If problems persist, no further upload attempts will be made.

UPLOAD_ROLLING_AUTO_REMOVE

Matching files are uploaded as they are created. They are deleted after they are uploaded. The specified path is checked every 5 seconds. A final check is made when all of your code (including tools) have stopped.

" } }, "documentation":"

Provides upload configuration information. Files are uploaded from the simulation job to a location you specify.

" diff --git a/docs/source/conf.py b/docs/source/conf.py index 959210eab1..1681eca7f3 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ # The short X.Y version. version = '1.20.' # The full version, including alpha/beta/rc tags. -release = '1.20.47' +release = '1.20.48' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.