From d9d692f7a350417480f8eb9592d8b0c936b6825a Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 24 Nov 2021 19:06:33 +0000 Subject: [PATCH] Update to latest models --- .../api-change-autoscaling-13680.json | 5 + .../api-change-customerprofiles-44354.json | 5 + .../api-change-elasticache-5184.json | 5 + .../api-change-imagebuilder-91516.json | 5 + .../api-change-iotsitewise-70722.json | 5 + .../next-release/api-change-lambda-62640.json | 5 + .../next-release/api-change-proton-5972.json | 5 + .../api-change-timestreamquery-22740.json | 5 + .../api-change-timestreamwrite-14873.json | 5 + .../api-change-translate-10402.json | 5 + .../autoscaling/2011-01-01/service-2.json | 146 +- .../2020-08-15/service-2.json | 504 ++++- .../elasticache/2015-02-02/service-2.json | 40 +- .../imagebuilder/2019-12-02/service-2.json | 38 +- .../iotsitewise/2019-12-02/paginators-1.json | 6 + .../iotsitewise/2019-12-02/service-2.json | 389 +++- .../data/lambda/2015-03-31/service-2.json | 352 +--- .../data/proton/2020-07-20/paginators-1.json | 41 + .../data/proton/2020-07-20/service-2.json | 1826 +++++++++++++++-- .../2018-11-01/paginators-1.json | 12 + .../2018-11-01/service-2.json | 1236 ++++++++++- .../2018-11-01/service-2.json | 220 +- .../data/translate/2017-07-01/service-2.json | 42 +- 23 files changed, 4214 insertions(+), 688 deletions(-) create mode 100644 .changes/next-release/api-change-autoscaling-13680.json create mode 100644 .changes/next-release/api-change-customerprofiles-44354.json create mode 100644 .changes/next-release/api-change-elasticache-5184.json create mode 100644 .changes/next-release/api-change-imagebuilder-91516.json create mode 100644 .changes/next-release/api-change-iotsitewise-70722.json create mode 100644 .changes/next-release/api-change-lambda-62640.json create mode 100644 .changes/next-release/api-change-proton-5972.json create mode 100644 .changes/next-release/api-change-timestreamquery-22740.json create mode 100644 .changes/next-release/api-change-timestreamwrite-14873.json create mode 100644 .changes/next-release/api-change-translate-10402.json diff --git a/.changes/next-release/api-change-autoscaling-13680.json b/.changes/next-release/api-change-autoscaling-13680.json new file mode 100644 index 0000000000..897d6f0c84 --- /dev/null +++ b/.changes/next-release/api-change-autoscaling-13680.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``autoscaling``", + "description": "Customers can now configure predictive scaling policies to proactively scale EC2 Auto Scaling groups based on any CloudWatch metrics that more accurately represent the load on the group than the four predefined metrics. They can also use math expressions to further customize the metrics." +} diff --git a/.changes/next-release/api-change-customerprofiles-44354.json b/.changes/next-release/api-change-customerprofiles-44354.json new file mode 100644 index 0000000000..827d1b14b2 --- /dev/null +++ b/.changes/next-release/api-change-customerprofiles-44354.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``customer-profiles``", + "description": "This release introduces a new auto-merging feature for profile matching. The auto-merging configurations can be set via CreateDomain API or UpdateDomain API. You can use GetIdentityResolutionJob API and ListIdentityResolutionJobs API to fetch job status." +} diff --git a/.changes/next-release/api-change-elasticache-5184.json b/.changes/next-release/api-change-elasticache-5184.json new file mode 100644 index 0000000000..705d914b8a --- /dev/null +++ b/.changes/next-release/api-change-elasticache-5184.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``elasticache``", + "description": "Doc only update for ElastiCache" +} diff --git a/.changes/next-release/api-change-imagebuilder-91516.json b/.changes/next-release/api-change-imagebuilder-91516.json new file mode 100644 index 0000000000..2f89d5595c --- /dev/null +++ b/.changes/next-release/api-change-imagebuilder-91516.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``imagebuilder``", + "description": "This release adds support for sharing AMIs with Organizations within an EC2 Image Builder Distribution Configuration." +} diff --git a/.changes/next-release/api-change-iotsitewise-70722.json b/.changes/next-release/api-change-iotsitewise-70722.json new file mode 100644 index 0000000000..b862ea02a5 --- /dev/null +++ b/.changes/next-release/api-change-iotsitewise-70722.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``iotsitewise``", + "description": "AWS IoT SiteWise now accepts data streams that aren't associated with any asset properties. You can organize data by updating data stream associations." +} diff --git a/.changes/next-release/api-change-lambda-62640.json b/.changes/next-release/api-change-lambda-62640.json new file mode 100644 index 0000000000..7ae936a693 --- /dev/null +++ b/.changes/next-release/api-change-lambda-62640.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``lambda``", + "description": "Remove Lambda function url apis" +} diff --git a/.changes/next-release/api-change-proton-5972.json b/.changes/next-release/api-change-proton-5972.json new file mode 100644 index 0000000000..271ba3edc0 --- /dev/null +++ b/.changes/next-release/api-change-proton-5972.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``proton``", + "description": "This release adds APIs for getting the outputs and provisioned stacks for Environments, Pipelines, and ServiceInstances. You can now add tags to EnvironmentAccountConnections. It also adds APIs for working with PR-based provisioning. Also, it adds APIs for syncing templates with a git repository." +} diff --git a/.changes/next-release/api-change-timestreamquery-22740.json b/.changes/next-release/api-change-timestreamquery-22740.json new file mode 100644 index 0000000000..ca8239ea00 --- /dev/null +++ b/.changes/next-release/api-change-timestreamquery-22740.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``timestream-query``", + "description": "Releasing Amazon Timestream Scheduled Queries. It makes real-time analytics more performant and cost-effective for customers by calculating and storing frequently accessed aggregates, and other computations, typically used in operational dashboards, business reports, and other analytics applications" +} diff --git a/.changes/next-release/api-change-timestreamwrite-14873.json b/.changes/next-release/api-change-timestreamwrite-14873.json new file mode 100644 index 0000000000..ef6cb585f1 --- /dev/null +++ b/.changes/next-release/api-change-timestreamwrite-14873.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``timestream-write``", + "description": "This release adds support for multi-measure records and magnetic store writes. Multi-measure records allow customers to store multiple measures in a single table row. Magnetic store writes enable customers to write late arrival data (data with timestamp in the past) directly into the magnetic store." +} diff --git a/.changes/next-release/api-change-translate-10402.json b/.changes/next-release/api-change-translate-10402.json new file mode 100644 index 0000000000..417465042b --- /dev/null +++ b/.changes/next-release/api-change-translate-10402.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``translate``", + "description": "This release enables customers to use translation settings to mask profane words and phrases in their translation output." +} diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 72a5bdb015..3dc9c2f703 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -1678,7 +1678,7 @@ "members":{ "Timestamps":{ "shape":"PredictiveScalingForecastTimestamps", - "documentation":"

The time stamps for the data points, in UTC format.

" + "documentation":"

The timestamps for the data points, in UTC format.

" }, "Values":{ "shape":"PredictiveScalingForecastValues", @@ -3603,7 +3603,7 @@ "members":{ "Timestamps":{ "shape":"PredictiveScalingForecastTimestamps", - "documentation":"

The time stamps for the data points, in UTC format.

" + "documentation":"

The timestamps for the data points, in UTC format.

" }, "Values":{ "shape":"PredictiveScalingForecastValues", @@ -3676,6 +3676,28 @@ }, "documentation":"

Specifies the minimum and maximum for the MemoryMiB object when you specify InstanceRequirements for an Auto Scaling group.

" }, + "Metric":{ + "type":"structure", + "required":[ + "Namespace", + "MetricName" + ], + "members":{ + "Namespace":{ + "shape":"MetricNamespace", + "documentation":"

The namespace of the metric. For more information, see the table in Amazon Web Services services that publish CloudWatch metrics in the Amazon CloudWatch User Guide.

" + }, + "MetricName":{ + "shape":"MetricName", + "documentation":"

The name of the metric.

" + }, + "Dimensions":{ + "shape":"MetricDimensions", + "documentation":"

The dimensions for the metric. For the list of available dimensions, see the Amazon Web Services documentation available from the table in Amazon Web Services services that publish CloudWatch metrics in the Amazon CloudWatch User Guide.

Conditional: If you published your metric with dimensions, you must specify the same dimensions in your scaling policy.

" + } + }, + "documentation":"

Represents a specific metric.

" + }, "MetricCollectionType":{ "type":"structure", "members":{ @@ -3690,6 +3712,37 @@ "type":"list", "member":{"shape":"MetricCollectionType"} }, + "MetricDataQueries":{ + "type":"list", + "member":{"shape":"MetricDataQuery"} + }, + "MetricDataQuery":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"XmlStringMaxLen255", + "documentation":"

A short name that identifies the object's results in the response. This name must be unique among all MetricDataQuery objects specified for a single scaling policy. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the mathematical expression. The valid characters are letters, numbers, and underscores. The first character must be a lowercase letter.

" + }, + "Expression":{ + "shape":"XmlStringMaxLen1023", + "documentation":"

The math expression to perform on the returned data, if this object is performing a math expression. This expression can use the Id of the other metrics to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions.

For example, to use search expressions, use the SEARCH() function in your metric math expression to combine multiple metrics from Auto Scaling groups that use a specific name prefix.

Conditional: Within each MetricDataQuery object, you must specify either Expression or MetricStat, but not both.

" + }, + "MetricStat":{ + "shape":"MetricStat", + "documentation":"

Information about the metric data to return.

Conditional: Within each MetricDataQuery object, you must specify either Expression or MetricStat, but not both.

" + }, + "Label":{ + "shape":"XmlStringMetricLabel", + "documentation":"

A human-readable label for this metric or expression. This is especially useful if this is a math expression, so that you know what the value represents.

" + }, + "ReturnData":{ + "shape":"ReturnData", + "documentation":"

Indicates whether to return the timestamps and raw data values of this metric.

If you use any math expressions, specify True for this value for only the final math expression that the metric specification is based on. You must specify False for ReturnData for all the other metrics and expressions used in the metric specification.

If you are only retrieving metrics and not performing any math expressions, do not specify anything for ReturnData. This sets it to its default (True).

" + } + }, + "documentation":"

The metric data to return. Also defines whether this call is returning data for one metric only, or whether it is performing a math expression on the values of returned metric statistics to create a new time series. A time series is a series of data points, each of which is associated with a timestamp.

For more information and examples, see Advanced predictive scaling policy configurations using customized metrics in the Amazon EC2 Auto Scaling User Guide.

" + }, "MetricDimension":{ "type":"structure", "required":[ @@ -3731,6 +3784,28 @@ "MetricName":{"type":"string"}, "MetricNamespace":{"type":"string"}, "MetricScale":{"type":"double"}, + "MetricStat":{ + "type":"structure", + "required":[ + "Metric", + "Stat" + ], + "members":{ + "Metric":{ + "shape":"Metric", + "documentation":"

The CloudWatch metric to return, including the metric name, namespace, and dimensions. To get the exact metric name, namespace, and dimensions, inspect the Metric object that is returned by a call to ListMetrics.

" + }, + "Stat":{ + "shape":"XmlStringMetricStat", + "documentation":"

The statistic to return. It can include any CloudWatch statistic or extended statistic. For a list of valid values, see the table in Statistics in the Amazon CloudWatch User Guide.

The most commonly used metrics for predictive scaling are Average and Sum.

" + }, + "Unit":{ + "shape":"MetricUnit", + "documentation":"

The unit to use for the returned data points. For a complete list of the units that CloudWatch supports, see the MetricDatum data type in the Amazon CloudWatch API Reference.

" + } + }, + "documentation":"

This structure defines the CloudWatch metric to return, along with the statistic, period, and unit.

For more information about the CloudWatch terminology below, see Amazon CloudWatch concepts in the Amazon CloudWatch User Guide.

" + }, "MetricStatistic":{ "type":"string", "enum":[ @@ -3950,6 +4025,39 @@ }, "documentation":"

Represents a predictive scaling policy configuration to use with Amazon EC2 Auto Scaling.

" }, + "PredictiveScalingCustomizedCapacityMetric":{ + "type":"structure", + "required":["MetricDataQueries"], + "members":{ + "MetricDataQueries":{ + "shape":"MetricDataQueries", + "documentation":"

One or more metric data queries to provide the data points for a capacity metric. Use multiple metric data queries only if you are performing a math expression on returned data.

" + } + }, + "documentation":"

Describes a customized capacity metric for a predictive scaling policy.

" + }, + "PredictiveScalingCustomizedLoadMetric":{ + "type":"structure", + "required":["MetricDataQueries"], + "members":{ + "MetricDataQueries":{ + "shape":"MetricDataQueries", + "documentation":"

One or more metric data queries to provide the data points for a load metric. Use multiple metric data queries only if you are performing a math expression on returned data.

" + } + }, + "documentation":"

Describes a customized load metric for a predictive scaling policy.

" + }, + "PredictiveScalingCustomizedScalingMetric":{ + "type":"structure", + "required":["MetricDataQueries"], + "members":{ + "MetricDataQueries":{ + "shape":"MetricDataQueries", + "documentation":"

One or more metric data queries to provide the data points for a scaling metric. Use multiple metric data queries only if you are performing a math expression on returned data.

" + } + }, + "documentation":"

Describes a customized scaling metric for a predictive scaling policy.

" + }, "PredictiveScalingForecastTimestamps":{ "type":"list", "member":{"shape":"TimestampType"} @@ -3976,22 +4084,34 @@ "members":{ "TargetValue":{ "shape":"MetricScale", - "documentation":"

Specifies the target utilization.

" + "documentation":"

Specifies the target utilization.

Some metrics are based on a count instead of a percentage, such as the request count for an Application Load Balancer or the number of messages in an SQS queue. If the scaling policy specifies one of these metrics, specify the target utilization as the optimal average request or message count per instance during any one-minute interval.

" }, "PredefinedMetricPairSpecification":{ "shape":"PredictiveScalingPredefinedMetricPair", - "documentation":"

The metric pair specification from which Amazon EC2 Auto Scaling determines the appropriate scaling metric and load metric to use.

" + "documentation":"

The predefined metric pair specification from which Amazon EC2 Auto Scaling determines the appropriate scaling metric and load metric to use.

" }, "PredefinedScalingMetricSpecification":{ "shape":"PredictiveScalingPredefinedScalingMetric", - "documentation":"

The scaling metric specification.

" + "documentation":"

The predefined scaling metric specification.

" }, "PredefinedLoadMetricSpecification":{ "shape":"PredictiveScalingPredefinedLoadMetric", - "documentation":"

The load metric specification.

" + "documentation":"

The predefined load metric specification.

" + }, + "CustomizedScalingMetricSpecification":{ + "shape":"PredictiveScalingCustomizedScalingMetric", + "documentation":"

The customized scaling metric specification.

" + }, + "CustomizedLoadMetricSpecification":{ + "shape":"PredictiveScalingCustomizedLoadMetric", + "documentation":"

The customized load metric specification.

" + }, + "CustomizedCapacityMetricSpecification":{ + "shape":"PredictiveScalingCustomizedCapacityMetric", + "documentation":"

The customized capacity metric specification.

" } }, - "documentation":"

This structure specifies the metrics and target utilization settings for a predictive scaling policy.

You must specify either a metric pair, or a load metric and a scaling metric individually. Specifying a metric pair instead of individual metrics provides a simpler way to configure metrics for a scaling policy. You choose the metric pair, and the policy automatically knows the correct sum and average statistics to use for the load metric and the scaling metric.

Example

" + "documentation":"

This structure specifies the metrics and target utilization settings for a predictive scaling policy.

You must specify either a metric pair, or a load metric and a scaling metric individually. Specifying a metric pair instead of individual metrics provides a simpler way to configure metrics for a scaling policy. You choose the metric pair, and the policy automatically knows the correct sum and average statistics to use for the load metric and the scaling metric.

Example

For information about using customized metrics with predictive scaling, see Advanced predictive scaling policy configurations using customized metrics in the Amazon EC2 Auto Scaling User Guide.

" }, "PredictiveScalingMetricSpecifications":{ "type":"list", @@ -4395,6 +4515,7 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" }, + "ReturnData":{"type":"boolean"}, "ScalingActivityInProgressFault":{ "type":"structure", "members":{ @@ -5142,6 +5263,17 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" }, + "XmlStringMetricLabel":{ + "type":"string", + "max":2047, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "XmlStringMetricStat":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, "XmlStringUserData":{ "type":"string", "max":21847, diff --git a/botocore/data/customer-profiles/2020-08-15/service-2.json b/botocore/data/customer-profiles/2020-08-15/service-2.json index 0ab01732e7..33f3fba0f5 100644 --- a/botocore/data/customer-profiles/2020-08-15/service-2.json +++ b/botocore/data/customer-profiles/2020-08-15/service-2.json @@ -45,7 +45,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

Use this API or UpdateDomain to enable identity resolution: set Matching to true.

" + "documentation":"

Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

Use this API or UpdateDomain to enable identity resolution: set Matching to true.

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

" }, "CreateProfile":{ "name":"CreateProfile", @@ -166,6 +166,23 @@ ], "documentation":"

Removes a ProfileObjectType from a specific domain as well as removes all the ProfileObjects of that type. It also disables integrations from this specific ProfileObjectType. In addition, it scrubs all of the fields of the standard profile that were populated from this ProfileObjectType.

" }, + "GetAutoMergingPreview":{ + "name":"GetAutoMergingPreview", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/identity-resolution-jobs/auto-merging-preview" + }, + "input":{"shape":"GetAutoMergingPreviewRequest"}, + "output":{"shape":"GetAutoMergingPreviewResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Tests the auto-merging settings of your Identity Resolution Job without merging your data. It randomly selects a sample of matching groups from the existing matching results, and applies the automerging settings that you provided. You can then view the number of profiles in the sample, the number of matches, and the number of profiles identified to be merged. This enables you to evaluate the accuracy of the attributes in your matching list.

You can't view which profiles are matched and would be merged.

We strongly recommend you use this API to do a dry run of the automerging process before running the Identity Resolution Job. Include at least two matching attributes. If your matching list includes too few attributes (such as only FirstName or only LastName), there may be a large number of matches. This increases the chances of erroneous merges.

" + }, "GetDomain":{ "name":"GetDomain", "http":{ @@ -183,6 +200,23 @@ ], "documentation":"

Returns information about a specific domain.

" }, + "GetIdentityResolutionJob":{ + "name":"GetIdentityResolutionJob", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/identity-resolution-jobs/{JobId}" + }, + "input":{"shape":"GetIdentityResolutionJobRequest"}, + "output":{"shape":"GetIdentityResolutionJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns information about an Identity Resolution Job in a specific domain.

Identity Resolution Jobs are set up using the Amazon Connect admin console. For more information, see Use Identity Resolution to consolidate similar profiles.

" + }, "GetIntegration":{ "name":"GetIntegration", "http":{ @@ -215,7 +249,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

This API is in preview release for Amazon Connect and subject to change.

Before calling this API, use CreateDomain or UpdateDomain to enable identity resolution: set Matching to true.

GetMatches returns potentially matching profiles, based on the results of the latest run of a machine learning process.

Amazon Connect starts a batch process every Saturday at 12AM UTC to identify matching profiles. The results are returned up to seven days after the Saturday run.

Amazon Connect uses the following profile attributes to identify matches:

For example, two or more profiles—with spelling mistakes such as John Doe and Jhn Doe, or different casing email addresses such as JOHN_DOE@ANYCOMPANY.COM and johndoe@anycompany.com, or different phone number formats such as 555-010-0000 and +1-555-010-0000—can be detected as belonging to the same customer John Doe and merged into a unified profile.

" + "documentation":"

This API is in preview release for Amazon Connect and subject to change.

Before calling this API, use CreateDomain or UpdateDomain to enable identity resolution: set Matching to true.

GetMatches returns potentially matching profiles, based on the results of the latest run of a machine learning process.

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

Amazon Connect uses the following profile attributes to identify matches:

For example, two or more profiles—with spelling mistakes such as John Doe and Jhn Doe, or different casing email addresses such as JOHN_DOE@ANYCOMPANY.COM and johndoe@anycompany.com, or different phone number formats such as 555-010-0000 and +1-555-010-0000—can be detected as belonging to the same customer John Doe and merged into a unified profile.

" }, "GetProfileObjectType":{ "name":"GetProfileObjectType", @@ -285,6 +319,23 @@ ], "documentation":"

Returns a list of all the domains for an AWS account that have been created.

" }, + "ListIdentityResolutionJobs":{ + "name":"ListIdentityResolutionJobs", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/identity-resolution-jobs" + }, + "input":{"shape":"ListIdentityResolutionJobsRequest"}, + "output":{"shape":"ListIdentityResolutionJobsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all of the Identity Resolution Jobs in your domain. The response sorts the list by JobStartTime.

" + }, "ListIntegrations":{ "name":"ListIntegrations", "http":{ @@ -497,7 +548,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.

After a domain is created, the name can’t be changed.

Use this API or CreateDomain to enable identity resolution: set Matching to true.

" + "documentation":"

Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.

After a domain is created, the name can’t be changed.

Use this API or CreateDomain to enable identity resolution: set Matching to true.

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

" }, "UpdateProfile":{ "name":"UpdateProfile", @@ -631,6 +682,25 @@ "key":{"shape":"string1To255"}, "value":{"shape":"string1To255"} }, + "AutoMerging":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"optionalBoolean", + "documentation":"

The flag that enables the auto-merging of duplicate profiles.

" + }, + "Consolidation":{ + "shape":"Consolidation", + "documentation":"

A list of matching attributes that represent matching criteria. If two profiles meet at least one of the requirements in the matching attributes list, they will be merged.

" + }, + "ConflictResolution":{ + "shape":"ConflictResolution", + "documentation":"

How the auto-merging process should resolve conflicts between different profiles. For example, if Profile A and Profile B have the same FirstName and LastName (and that is the matching criteria), which EmailAddress should be used?

" + } + }, + "documentation":"

Configuration settings for how to perform the auto-merging of profiles.

" + }, "BadRequestException":{ "type":"structure", "members":{ @@ -651,6 +721,28 @@ "max":512, "pattern":".*" }, + "ConflictResolution":{ + "type":"structure", + "required":["ConflictResolvingModel"], + "members":{ + "ConflictResolvingModel":{ + "shape":"ConflictResolvingModel", + "documentation":"

How the auto-merging process should resolve conflicts between different profiles.

" + }, + "SourceName":{ + "shape":"string1To255", + "documentation":"

The ObjectType name that is used to resolve profile merging conflicts when choosing SOURCE as the ConflictResolvingModel.

" + } + }, + "documentation":"

How the auto-merging process should resolve conflicts between different profiles.

" + }, + "ConflictResolvingModel":{ + "type":"string", + "enum":[ + "RECENCY", + "SOURCE" + ] + }, "ConnectorOperator":{ "type":"structure", "members":{ @@ -682,6 +774,17 @@ "max":256, "pattern":"[\\w/!@#+=.-]+" }, + "Consolidation":{ + "type":"structure", + "required":["MatchingAttributesList"], + "members":{ + "MatchingAttributesList":{ + "shape":"MatchingAttributesList", + "documentation":"

A list of matching criteria.

" + } + }, + "documentation":"

The matching criteria to be used during the auto-merging process.

" + }, "CreateDomainRequest":{ "type":"structure", "required":[ @@ -709,7 +812,7 @@ }, "Matching":{ "shape":"MatchingRequest", - "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. After that batch process completes, use the GetMatches API to return and review the results.

" + "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, "Tags":{ "shape":"TagMap", @@ -744,7 +847,7 @@ }, "Matching":{ "shape":"MatchingResponse", - "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. After that batch process completes, use the GetMatches API to return and review the results.

" + "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, "CreatedAt":{ "shape":"timestamp", @@ -1096,6 +1199,27 @@ }, "documentation":"

Usage-specific statistics about the domain.

" }, + "Double":{"type":"double"}, + "ExportingConfig":{ + "type":"structure", + "members":{ + "S3Exporting":{ + "shape":"S3ExportingConfig", + "documentation":"

The S3 location where Identity Resolution Jobs write result files.

" + } + }, + "documentation":"

Configuration information about the S3 bucket where Identity Resolution Jobs writes result files.

You need to give Customer Profiles service principal write permission to your S3 bucket. Otherwise, you'll get an exception in the API response. For an example policy, see Amazon Connect Customer Profiles cross-service confused deputy prevention.

" + }, + "ExportingLocation":{ + "type":"structure", + "members":{ + "S3Exporting":{ + "shape":"S3ExportingLocation", + "documentation":"

Information about the S3 location where Identity Resolution Jobs write result files.

" + } + }, + "documentation":"

The S3 location where Identity Resolution Jobs write result files.

" + }, "FieldContentType":{ "type":"string", "enum":[ @@ -1260,6 +1384,52 @@ "UNSPECIFIED" ] }, + "GetAutoMergingPreviewRequest":{ + "type":"structure", + "required":[ + "DomainName", + "Consolidation", + "ConflictResolution" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "Consolidation":{ + "shape":"Consolidation", + "documentation":"

A list of matching attributes that represent matching criteria.

" + }, + "ConflictResolution":{ + "shape":"ConflictResolution", + "documentation":"

How the auto-merging process should resolve conflicts between different profiles.

" + } + } + }, + "GetAutoMergingPreviewResponse":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

" + }, + "NumberOfMatchesInSample":{ + "shape":"long", + "documentation":"

The number of match groups in the domain that have been reviewed in this preview dry run.

" + }, + "NumberOfProfilesInSample":{ + "shape":"long", + "documentation":"

The number of profiles found in this preview dry run.

" + }, + "NumberOfProfilesWillBeMerged":{ + "shape":"long", + "documentation":"

The number of profiles that would be merged if this wasn't a preview dry run.

" + } + } + }, "GetDomainRequest":{ "type":"structure", "required":["DomainName"], @@ -1302,7 +1472,7 @@ }, "Matching":{ "shape":"MatchingResponse", - "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. After that batch process completes, use the GetMatches API to return and review the results.

" + "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, "CreatedAt":{ "shape":"timestamp", @@ -1318,6 +1488,76 @@ } } }, + "GetIdentityResolutionJobRequest":{ + "type":"structure", + "required":[ + "DomainName", + "JobId" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "JobId":{ + "shape":"uuid", + "documentation":"

The unique identifier of the Identity Resolution Job.

", + "location":"uri", + "locationName":"JobId" + } + } + }, + "GetIdentityResolutionJobResponse":{ + "type":"structure", + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

" + }, + "JobId":{ + "shape":"uuid", + "documentation":"

The unique identifier of the Identity Resolution Job.

" + }, + "Status":{ + "shape":"IdentityResolutionJobStatus", + "documentation":"

The status of the Identity Resolution Job.

" + }, + "Message":{ + "shape":"stringTo2048", + "documentation":"

The error messages that are generated when the Identity Resolution Job runs.

" + }, + "JobStartTime":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the Identity Resolution Job was started or will be started.

" + }, + "JobEndTime":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the Identity Resolution Job was completed.

" + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the Identity Resolution Job was most recently edited.

" + }, + "JobExpirationTime":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the Identity Resolution Job will expire.

" + }, + "AutoMerging":{ + "shape":"AutoMerging", + "documentation":"

Configuration settings for how to perform the auto-merging of profiles.

" + }, + "ExportingLocation":{ + "shape":"ExportingLocation", + "documentation":"

The S3 location where the Identity Resolution Job writes result files.

" + }, + "JobStats":{ + "shape":"JobStats", + "documentation":"

Statistics about the Identity Resolution Job.

" + } + } + }, "GetIntegrationRequest":{ "type":"structure", "required":[ @@ -1470,6 +1710,10 @@ "shape":"boolean", "documentation":"

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

" }, + "SourceLastUpdatedTimestampFormat":{ + "shape":"string1To255", + "documentation":"

The format of your sourceLastUpdatedTimestamp that was previously set up.

" + }, "Fields":{ "shape":"FieldMap", "documentation":"

A map of the name and ObjectType field.

" @@ -1523,6 +1767,10 @@ "shape":"boolean", "documentation":"

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

" }, + "SourceLastUpdatedTimestampFormat":{ + "shape":"string1To255", + "documentation":"

The format of your sourceLastUpdatedTimestamp that was previously set up.

" + }, "Fields":{ "shape":"FieldMap", "documentation":"

A map of the name and ObjectType field.

" @@ -1533,6 +1781,60 @@ } } }, + "IdentityResolutionJob":{ + "type":"structure", + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

" + }, + "JobId":{ + "shape":"uuid", + "documentation":"

The unique identifier of the Identity Resolution Job.

" + }, + "Status":{ + "shape":"IdentityResolutionJobStatus", + "documentation":"

The status of the Identity Resolution Job.

" + }, + "JobStartTime":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the job was started or will be started.

" + }, + "JobEndTime":{ + "shape":"timestamp", + "documentation":"

The timestamp of when the job was completed.

" + }, + "JobStats":{ + "shape":"JobStats", + "documentation":"

Statistics about an Identity Resolution Job.

" + }, + "ExportingLocation":{ + "shape":"ExportingLocation", + "documentation":"

The S3 location where the Identity Resolution Job writes result files.

" + }, + "Message":{ + "shape":"stringTo2048", + "documentation":"

The error messages that are generated when the Identity Resolution Job runs.

" + } + }, + "documentation":"

Information about the Identity Resolution Job.

" + }, + "IdentityResolutionJobStatus":{ + "type":"string", + "enum":[ + "PENDING", + "PREPROCESSING", + "FIND_MATCHING", + "MERGING", + "COMPLETED", + "PARTIAL_SUCCESS", + "FAILED" + ] + }, + "IdentityResolutionJobsList":{ + "type":"list", + "member":{"shape":"IdentityResolutionJob"} + }, "IncrementalPullConfig":{ "type":"structure", "members":{ @@ -1557,6 +1859,60 @@ "exception":true, "fault":true }, + "JobSchedule":{ + "type":"structure", + "required":[ + "DayOfTheWeek", + "Time" + ], + "members":{ + "DayOfTheWeek":{ + "shape":"JobScheduleDayOfTheWeek", + "documentation":"

The day when the Identity Resolution Job should run every week.

" + }, + "Time":{ + "shape":"JobScheduleTime", + "documentation":"

The time when the Identity Resolution Job should run every week.

" + } + }, + "documentation":"

The day and time when do you want to start the Identity Resolution Job every week.

" + }, + "JobScheduleDayOfTheWeek":{ + "type":"string", + "enum":[ + "SUNDAY", + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY" + ] + }, + "JobScheduleTime":{ + "type":"string", + "max":5, + "min":3, + "pattern":"^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$" + }, + "JobStats":{ + "type":"structure", + "members":{ + "NumberOfProfilesReviewed":{ + "shape":"long", + "documentation":"

The number of profiles reviewed.

" + }, + "NumberOfMatchesFound":{ + "shape":"long", + "documentation":"

The number of matches found.

" + }, + "NumberOfMergesDone":{ + "shape":"long", + "documentation":"

The number of merges completed.

" + } + }, + "documentation":"

Statistics about the Identity Resolution Job.

" + }, "KeyMap":{ "type":"map", "key":{"shape":"name"}, @@ -1660,6 +2016,43 @@ } } }, + "ListIdentityResolutionJobsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

The unique name of the domain.

", + "location":"uri", + "locationName":"DomainName" + }, + "NextToken":{ + "shape":"token", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListIdentityResolutionJobsResponse":{ + "type":"structure", + "members":{ + "IdentityResolutionJobsList":{ + "shape":"IdentityResolutionJobsList", + "documentation":"

A list of Identity Resolution Jobs.

" + }, + "NextToken":{ + "shape":"token", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, "ListIntegrationItem":{ "type":"structure", "required":[ @@ -1983,6 +2376,10 @@ "ProfileIds":{ "shape":"ProfileIdList", "documentation":"

A list of identifiers for profiles that match.

" + }, + "ConfidenceScore":{ + "shape":"Double", + "documentation":"

A number between 0 and 1 that represents the confidence level of assigning profiles to a matching group. A score of 1 likely indicates an exact match.

" } }, "documentation":"

The Match group object.

" @@ -1991,6 +2388,18 @@ "type":"list", "member":{"shape":"MatchItem"} }, + "MatchingAttributes":{ + "type":"list", + "member":{"shape":"string1To255"}, + "max":20, + "min":1 + }, + "MatchingAttributesList":{ + "type":"list", + "member":{"shape":"MatchingAttributes"}, + "max":10, + "min":1 + }, "MatchingRequest":{ "type":"structure", "required":["Enabled"], @@ -1998,6 +2407,18 @@ "Enabled":{ "shape":"optionalBoolean", "documentation":"

The flag that enables the matching process of duplicate profiles.

" + }, + "JobSchedule":{ + "shape":"JobSchedule", + "documentation":"

The day and time when do you want to start the Identity Resolution Job every week.

" + }, + "AutoMerging":{ + "shape":"AutoMerging", + "documentation":"

Configuration information about the auto-merging process.

" + }, + "ExportingConfig":{ + "shape":"ExportingConfig", + "documentation":"

Configuration information for exporting Identity Resolution results, for example, to an S3 bucket.

" } }, "documentation":"

The flag that enables the matching process of duplicate profiles.

" @@ -2008,6 +2429,18 @@ "Enabled":{ "shape":"optionalBoolean", "documentation":"

The flag that enables the matching process of duplicate profiles.

" + }, + "JobSchedule":{ + "shape":"JobSchedule", + "documentation":"

The day and time when do you want to start the Identity Resolution Job every week.

" + }, + "AutoMerging":{ + "shape":"AutoMerging", + "documentation":"

Configuration information about the auto-merging process.

" + }, + "ExportingConfig":{ + "shape":"ExportingConfig", + "documentation":"

Configuration information for exporting Identity Resolution results, for example, to an S3 bucket.

" } }, "documentation":"

The flag that enables the matching process of duplicate profiles.

" @@ -2400,6 +2833,10 @@ "shape":"boolean", "documentation":"

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

" }, + "SourceLastUpdatedTimestampFormat":{ + "shape":"string1To255", + "documentation":"

The format of your sourceLastUpdatedTimestamp that was previously set up.

" + }, "Fields":{ "shape":"FieldMap", "documentation":"

A map of the name and ObjectType field.

" @@ -2445,6 +2882,10 @@ "shape":"boolean", "documentation":"

Indicates whether a profile should be created when data is received if one doesn’t exist for an object of this type. The default is FALSE. If the AllowProfileCreation flag is set to FALSE, then the service tries to fetch a standard profile and associate this object with the profile. If it is set to TRUE, and if no match is found, then the service creates a new standard profile.

" }, + "SourceLastUpdatedTimestampFormat":{ + "shape":"string1To255", + "documentation":"

The format of your sourceLastUpdatedTimestamp that was previously set up in fields that were parsed using SimpleDateFormat. If you have sourceLastUpdatedTimestamp in your field, you must set up sourceLastUpdatedTimestampFormat.

" + }, "Fields":{ "shape":"FieldMap", "documentation":"

A map of the name and ObjectType field.

" @@ -2501,6 +2942,35 @@ "NO_OP" ] }, + "S3ExportingConfig":{ + "type":"structure", + "required":["S3BucketName"], + "members":{ + "S3BucketName":{ + "shape":"s3BucketName", + "documentation":"

The name of the S3 bucket where Identity Resolution Jobs write result files.

" + }, + "S3KeyName":{ + "shape":"s3KeyNameCustomerOutputConfig", + "documentation":"

The S3 key name of the location where Identity Resolution Jobs write result files.

" + } + }, + "documentation":"

Configuration information about the S3 bucket where Identity Resolution Jobs write result files.

" + }, + "S3ExportingLocation":{ + "type":"structure", + "members":{ + "S3BucketName":{ + "shape":"s3BucketName", + "documentation":"

The name of the S3 bucket name where Identity Resolution Jobs write result files.

" + }, + "S3KeyName":{ + "shape":"s3KeyName", + "documentation":"

The S3 key name of the location where Identity Resolution Jobs write result files.

" + } + }, + "documentation":"

The S3 location where Identity Resolution Jobs write result files.

" + }, "S3SourceProperties":{ "type":"structure", "required":["BucketName"], @@ -3026,7 +3496,7 @@ }, "Matching":{ "shape":"MatchingRequest", - "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. After that batch process completes, use the GetMatches API to return and review the results.

" + "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, "Tags":{ "shape":"TagMap", @@ -3060,7 +3530,7 @@ }, "Matching":{ "shape":"MatchingResponse", - "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process every Saturday at 12AM UTC to detect duplicate profiles in your domains. After that batch process completes, use the GetMatches API to return and review the results.

" + "documentation":"

The process of matching duplicate profiles. If Matching = true, Amazon Connect Customer Profiles starts a weekly batch process called Identity Resolution Job. If you do not specify a date and time for Identity Resolution Job to run, by default it runs every Saturday at 12AM UTC to detect duplicate profiles in your domains.

After the Identity Resolution Job completes, use the GetMatches API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from S3.

" }, "CreatedAt":{ "shape":"timestamp", @@ -3246,6 +3716,24 @@ "type":"list", "member":{"shape":"string1To255"} }, + "s3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-z0-9.-]+$" + }, + "s3KeyName":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".*" + }, + "s3KeyNameCustomerOutputConfig":{ + "type":"string", + "max":800, + "min":1, + "pattern":".*" + }, "sqsQueueUrl":{ "type":"string", "max":255, diff --git a/botocore/data/elasticache/2015-02-02/service-2.json b/botocore/data/elasticache/2015-02-02/service-2.json index 4ed88981ed..18a3e1ff47 100644 --- a/botocore/data/elasticache/2015-02-02/service-2.json +++ b/botocore/data/elasticache/2015-02-02/service-2.json @@ -1565,7 +1565,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The name of the compute and memory capacity node type for the cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Engine":{ "shape":"String", @@ -1622,7 +1622,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SecurityGroups":{ "shape":"SecurityGroupMembershipList", @@ -1805,7 +1805,7 @@ "documentation":"

The customer outpost ARN of the cache node.

" } }, - "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

Represents an individual cache node within a cluster. Each cache node runs its own instance of the cluster's protocol-compliant caching software - either Memcached or Redis.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "CacheNodeIdsList":{ "type":"list", @@ -2434,7 +2434,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Engine":{ "shape":"String", @@ -2486,7 +2486,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -2699,7 +2699,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The compute and memory capacity of the nodes in the node group (shard).

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Engine":{ "shape":"String", @@ -2751,7 +2751,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -3411,7 +3411,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The cache node type filter value. Use this parameter to show only those reservations matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Duration":{ "shape":"String", @@ -3445,7 +3445,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The cache node type filter value. Use this parameter to show only the available offerings matching the specified cache node type.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Duration":{ "shape":"String", @@ -4495,7 +4495,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -4683,7 +4683,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -5747,7 +5747,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The cache node type for the reserved cache nodes.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "StartTime":{ "shape":"TStamp", @@ -5859,7 +5859,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The cache node type for the reserved cache node.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Duration":{ "shape":"Integer", @@ -6197,7 +6197,7 @@ }, "CacheNodeType":{ "shape":"String", - "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" + "documentation":"

The name of the compute and memory capacity node type for the source cluster.

The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.

Additional node type info

" }, "Engine":{ "shape":"String", @@ -6249,7 +6249,7 @@ }, "AutoMinorVersionUpgrade":{ "shape":"Boolean", - "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions. 

" + "documentation":"

 If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. 

" }, "SnapshotRetentionLimit":{ "shape":"IntegerOptional", @@ -6738,7 +6738,10 @@ "shape":"EngineType", "documentation":"

The current supported value is Redis.

" }, - "MinimumEngineVersion":{"shape":"String"}, + "MinimumEngineVersion":{ + "shape":"String", + "documentation":"

The minimum engine version required, which is Redis 6.0

" + }, "AccessString":{ "shape":"String", "documentation":"

Access permissions string used for this user.

" @@ -6788,7 +6791,10 @@ "shape":"UserIdList", "documentation":"

The list of user IDs that belong to the user group.

" }, - "MinimumEngineVersion":{"shape":"String"}, + "MinimumEngineVersion":{ + "shape":"String", + "documentation":"

The minimum engine version required, which is Redis 6.0

" + }, "PendingChanges":{ "shape":"UserGroupPendingChanges", "documentation":"

A list of updates being applied to the user group.

" diff --git a/botocore/data/imagebuilder/2019-12-02/service-2.json b/botocore/data/imagebuilder/2019-12-02/service-2.json index be13a8ba9b..16fae75bb1 100644 --- a/botocore/data/imagebuilder/2019-12-02/service-2.json +++ b/botocore/data/imagebuilder/2019-12-02/service-2.json @@ -264,7 +264,7 @@ {"shape":"CallRateLimitExceededException"}, {"shape":"ResourceDependencyException"} ], - "documentation":"

Deletes an Image Builder image resource. This does not delete any EC2 AMIs or ECR container images that are created during the image build process. You must clean those up separately, using the appropriate Amazon EC2 or Amazon ECR console actions, or API or CLI commands.

" + "documentation":"

Deletes an Image Builder image resource. This does not delete any EC2 AMIs or ECR container images that are created during the image build process. You must clean those up separately, using the appropriate Amazon EC2 or Amazon ECR console actions, or API or CLI commands.

" }, "DeleteImagePipeline":{ "name":"DeleteImagePipeline", @@ -807,7 +807,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Applies a policy to a container image. We recommend that you call the RAM API CreateResourceShare (https://docs.aws.amazon.com/ram/latest/APIReference/API_CreateResourceShare.html) to share resources. If you call the Image Builder API PutContainerImagePolicy, you must also call the RAM API PromoteResourceShareCreatedFromPolicy (https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html) in order for the resource to be visible to all principals with whom the resource is shared.

" + "documentation":"

Applies a policy to a container image. We recommend that you call the RAM API CreateResourceShare (https://docs.aws.amazon.com//ram/latest/APIReference/API_CreateResourceShare.html) to share resources. If you call the Image Builder API PutContainerImagePolicy, you must also call the RAM API PromoteResourceShareCreatedFromPolicy (https://docs.aws.amazon.com//ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html) in order for the resource to be visible to all principals with whom the resource is shared.

" }, "PutImagePolicy":{ "name":"PutImagePolicy", @@ -1023,7 +1023,7 @@ }, "description":{ "shape":"NonEmptyString", - "documentation":"

The description of the distribution configuration. Minimum and maximum length are in characters.

" + "documentation":"

The description of the AMI distribution configuration. Minimum and maximum length are in characters.

" }, "targetAccountIds":{ "shape":"AccountList", @@ -1286,8 +1286,8 @@ }, "ComponentParameterValue":{ "type":"string", - "min":1, - "pattern":"[^\\x00]+" + "min":0, + "pattern":"[^\\x00]*" }, "ComponentParameterValueList":{ "type":"list", @@ -3594,6 +3594,14 @@ "userGroups":{ "shape":"StringList", "documentation":"

The name of the group.

" + }, + "organizationArns":{ + "shape":"OrganizationArnList", + "documentation":"

The ARN for an Amazon Web Services Organization that you want to share your AMI with. For more information, see What is Organizations?.

" + }, + "organizationalUnitArns":{ + "shape":"OrganizationalUnitArnList", + "documentation":"

The ARN for an Organizations organizational unit (OU) that you want to share your AMI with. For more information about key concepts for Organizations, see Organizations terminology and concepts.

" } }, "documentation":"

Describes the configuration for a launch permission. The launch permission modification request is sent to the Amazon EC2 ModifyImageAttribute API on behalf of the user for each Region they have selected to distribute the AMI. To make an AMI public, set the launch permission authorized accounts to all. See the examples for making an AMI public at Amazon EC2 ModifyImageAttribute.

" @@ -4099,6 +4107,26 @@ "min":1 }, "NullableBoolean":{"type":"boolean"}, + "OrganizationArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:organizations::[0-9]{12}:organization/o-[a-z0-9]{10,32}$" + }, + "OrganizationArnList":{ + "type":"list", + "member":{"shape":"OrganizationArn"}, + "max":25, + "min":1 + }, + "OrganizationalUnitArn":{ + "type":"string", + "pattern":"^arn:aws[^:]*:organizations::[0-9]{12}:ou/o-[a-z0-9]{10,32}/ou-[0-9a-z]{4,32}-[0-9a-z]{8,32}" + }, + "OrganizationalUnitArnList":{ + "type":"list", + "member":{"shape":"OrganizationalUnitArn"}, + "max":25, + "min":1 + }, "OsVersion":{ "type":"string", "min":1 diff --git a/botocore/data/iotsitewise/2019-12-02/paginators-1.json b/botocore/data/iotsitewise/2019-12-02/paginators-1.json index a70b6688bb..0f3fe49a31 100644 --- a/botocore/data/iotsitewise/2019-12-02/paginators-1.json +++ b/botocore/data/iotsitewise/2019-12-02/paginators-1.json @@ -77,6 +77,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "interpolatedAssetPropertyValues" + }, + "ListTimeSeries": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "TimeSeriesSummaries" } } } diff --git a/botocore/data/iotsitewise/2019-12-02/service-2.json b/botocore/data/iotsitewise/2019-12-02/service-2.json index a97cd150fb..66e5ae2fbc 100644 --- a/botocore/data/iotsitewise/2019-12-02/service-2.json +++ b/botocore/data/iotsitewise/2019-12-02/service-2.json @@ -30,6 +30,23 @@ "documentation":"

Associates a child asset with the given parent asset through a hierarchy defined in the parent asset's model. For more information, see Associating assets in the IoT SiteWise User Guide.

", "endpoint":{"hostPrefix":"api."} }, + "AssociateTimeSeriesToAssetProperty":{ + "name":"AssociateTimeSeriesToAssetProperty", + "http":{ + "method":"POST", + "requestUri":"/timeseries/associate/" + }, + "input":{"shape":"AssociateTimeSeriesToAssetPropertyRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Associates a time series (data stream) with an asset property.

", + "endpoint":{"hostPrefix":"api."} + }, "BatchAssociateProjectAssets":{ "name":"BatchAssociateProjectAssets", "http":{ @@ -221,7 +238,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a project in the specified portal.

", + "documentation":"

Creates a project in the specified portal.

Make sure that the project name and description don't contain confidential information.

", "endpoint":{"hostPrefix":"monitor."} }, "DeleteAccessPolicy":{ @@ -351,6 +368,23 @@ "documentation":"

Deletes a project from IoT SiteWise Monitor.

", "endpoint":{"hostPrefix":"monitor."} }, + "DeleteTimeSeries":{ + "name":"DeleteTimeSeries", + "http":{ + "method":"POST", + "requestUri":"/timeseries/delete/" + }, + "input":{"shape":"DeleteTimeSeriesRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Deletes a time series (data stream). If you delete a time series that's associated with an asset property, the asset property still exists, but the time series will no longer be associated with this asset property.

To identify a time series, do one of the following:

", + "endpoint":{"hostPrefix":"api."} + }, "DescribeAccessPolicy":{ "name":"DescribeAccessPolicy", "http":{ @@ -560,6 +594,23 @@ "documentation":"

Retrieves information about the storage configuration for IoT SiteWise.

", "endpoint":{"hostPrefix":"api."} }, + "DescribeTimeSeries":{ + "name":"DescribeTimeSeries", + "http":{ + "method":"GET", + "requestUri":"/timeseries/describe/" + }, + "input":{"shape":"DescribeTimeSeriesRequest"}, + "output":{"shape":"DescribeTimeSeriesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves information about a time series (data stream).

To identify a time series, do one of the following:

", + "endpoint":{"hostPrefix":"api."} + }, "DisassociateAssets":{ "name":"DisassociateAssets", "http":{ @@ -577,6 +628,23 @@ "documentation":"

Disassociates a child asset from the given parent asset through a hierarchy defined in the parent asset's model.

", "endpoint":{"hostPrefix":"api."} }, + "DisassociateTimeSeriesFromAssetProperty":{ + "name":"DisassociateTimeSeriesFromAssetProperty", + "http":{ + "method":"POST", + "requestUri":"/timeseries/disassociate/" + }, + "input":{"shape":"DisassociateTimeSeriesFromAssetPropertyRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictingOperationException"} + ], + "documentation":"

Disassociates a time series (data stream) from an asset property.

", + "endpoint":{"hostPrefix":"api."} + }, "GetAssetPropertyAggregates":{ "name":"GetAssetPropertyAggregates", "http":{ @@ -837,6 +905,23 @@ "documentation":"

Retrieves the list of tags for an IoT SiteWise resource.

", "endpoint":{"hostPrefix":"api."} }, + "ListTimeSeries":{ + "name":"ListTimeSeries", + "http":{ + "method":"GET", + "requestUri":"/timeseries/" + }, + "input":{"shape":"ListTimeSeriesRequest"}, + "output":{"shape":"ListTimeSeriesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Retrieves a paginated list of time series (data streams).

", + "endpoint":{"hostPrefix":"api."} + }, "PutDefaultEncryptionConfiguration":{ "name":"PutDefaultEncryptionConfiguration", "http":{ @@ -1802,6 +1887,39 @@ } } }, + "AssociateTimeSeriesToAssetPropertyRequest":{ + "type":"structure", + "required":[ + "alias", + "assetId", + "propertyId" + ], + "members":{ + "alias":{ + "shape":"PropertyAlias", + "documentation":"

The alias that identifies the time series.

", + "location":"querystring", + "locationName":"alias" + }, + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset in which the asset property was created.

", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

", + "location":"querystring", + "locationName":"propertyId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, "AssociatedAssetsSummaries":{ "type":"list", "member":{"shape":"AssociatedAssetsSummary"} @@ -2757,6 +2875,34 @@ "members":{ } }, + "DeleteTimeSeriesRequest":{ + "type":"structure", + "members":{ + "alias":{ + "shape":"PropertyAlias", + "documentation":"

The alias that identifies the time series.

", + "location":"querystring", + "locationName":"alias" + }, + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset in which the asset property was created.

", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

", + "location":"querystring", + "locationName":"propertyId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, "DescribeAccessPolicyRequest":{ "type":"structure", "required":["accessPolicyId"], @@ -3074,7 +3220,7 @@ }, "kmsKeyArn":{ "shape":"ARN", - "documentation":"

The key ARN of the customer managed customer master key (CMK) used for KMS encryption if you use KMS_BASED_ENCRYPTION.

" + "documentation":"

The key ARN of the customer managed key used for KMS encryption if you use KMS_BASED_ENCRYPTION.

" }, "configurationStatus":{ "shape":"ConfigurationStatus", @@ -3359,6 +3505,10 @@ "shape":"MultiLayerStorage", "documentation":"

Contains information about the storage destination.

" }, + "disassociatedDataStorage":{ + "shape":"DisassociatedDataStorageState", + "documentation":"

Contains the storage configuration for time series (data streams) that aren't associated with asset properties. The disassociatedDataStorage can be one of the following values:

For more information, see Data streams in the IoT SiteWise User Guide.

" + }, "configurationStatus":{"shape":"ConfigurationStatus"}, "lastUpdateDate":{ "shape":"Timestamp", @@ -3366,6 +3516,72 @@ } } }, + "DescribeTimeSeriesRequest":{ + "type":"structure", + "members":{ + "alias":{ + "shape":"PropertyAlias", + "documentation":"

The alias that identifies the time series.

", + "location":"querystring", + "locationName":"alias" + }, + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset in which the asset property was created.

", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

", + "location":"querystring", + "locationName":"propertyId" + } + } + }, + "DescribeTimeSeriesResponse":{ + "type":"structure", + "required":[ + "timeSeriesId", + "dataType", + "timeSeriesCreationDate", + "timeSeriesLastUpdateDate" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset in which the asset property was created.

" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

" + }, + "alias":{ + "shape":"PropertyAlias", + "documentation":"

The alias that identifies the time series.

" + }, + "timeSeriesId":{ + "shape":"TimeSeriesId", + "documentation":"

The ID of the time series.

" + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

The data type of the time series.

If you specify STRUCT, you must also specify dataTypeSpec to identify the type of the structure for this time series.

" + }, + "dataTypeSpec":{ + "shape":"Name", + "documentation":"

The data type of the structure for this time series. This parameter is required for time series that have the STRUCT data type.

The options for this parameter depend on the type of the composite model in which you created the asset property that is associated with your time series. Use AWS/ALARM_STATE for alarm state in alarm composite models.

" + }, + "timeSeriesCreationDate":{ + "shape":"Timestamp", + "documentation":"

The date that the time series was created, in Unix epoch time.

" + }, + "timeSeriesLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date that the time series was last updated, in Unix epoch time.

" + } + } + }, "Description":{ "type":"string", "max":2048, @@ -3431,6 +3647,46 @@ } } }, + "DisassociateTimeSeriesFromAssetPropertyRequest":{ + "type":"structure", + "required":[ + "alias", + "assetId", + "propertyId" + ], + "members":{ + "alias":{ + "shape":"PropertyAlias", + "documentation":"

The alias that identifies the time series.

", + "location":"querystring", + "locationName":"alias" + }, + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset in which the asset property was created.

", + "location":"querystring", + "locationName":"assetId" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

", + "location":"querystring", + "locationName":"propertyId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "idempotencyToken":true + } + } + }, + "DisassociatedDataStorageState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "Email":{ "type":"string", "max":255, @@ -3873,13 +4129,13 @@ }, "type":{ "shape":"InterpolationType", - "documentation":"

The interpolation type.

Valid values: LINEAR_INTERPOLATION | LOCF_INTERPOLATION

", + "documentation":"

The interpolation type.

Valid values: LINEAR_INTERPOLATION | LOCF_INTERPOLATION

", "location":"querystring", "locationName":"type" }, "intervalWindowInSeconds":{ "shape":"IntervalWindowInSeconds", - "documentation":"

The query interval for the window in seconds. IoT SiteWise computes each interpolated value by using data points from the timestamp of each interval minus the window to the timestamp of each interval plus the window. If not specified, the window is between the start time minus the interval and the end time plus the interval.

For example, you can get the interpolated temperature values for a wind turbine every 24 hours over a duration of 7 days. If the interpolation starts on July 1, 2021, at 9 AM with a window of 2 hours, IoT SiteWise uses the data points from 7 AM (9 AM - 2 hours) to 11 AM (9 AM + 2 hours) on July 2, 2021 to compute the first interpolated value, uses the data points from 7 AM (9 AM - 2 hours) to 11 AM (9 AM + 2 hours) on July 3, 2021 to compute the second interpolated value, and so on.

", + "documentation":"

The query interval for the window, in seconds. IoT SiteWise computes each interpolated value by using data points from the timestamp of each interval, minus the window to the timestamp of each interval plus the window. If not specified, the window ranges between the start time minus the interval and the end time plus the interval.

For example, you can get the interpolated temperature values for a wind turbine every 24 hours over a duration of 7 days. If the interpolation starts on July 1, 2021, at 9 AM with a window of 2 hours, IoT SiteWise uses the data points from 7 AM (9 AM minus 2 hours) to 11 AM (9 AM plus 2 hours) on July 2, 2021 to compute the first interpolated value. Next, IoT SiteWise uses the data points from 7 AM (9 AM minus 2 hours) to 11 AM (9 AM plus 2 hours) on July 3, 2021 to compute the second interpolated value, and so on.

", "location":"querystring", "locationName":"intervalWindowInSeconds" } @@ -4565,6 +4821,62 @@ } } }, + "ListTimeSeriesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to be used for the next set of paginated results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return for each paginated request.

", + "location":"querystring", + "locationName":"maxResults" + }, + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset in which the asset property was created.

", + "location":"querystring", + "locationName":"assetId" + }, + "aliasPrefix":{ + "shape":"PropertyAlias", + "documentation":"

The alias prefix of the time series.

", + "location":"querystring", + "locationName":"aliasPrefix" + }, + "timeSeriesType":{ + "shape":"ListTimeSeriesType", + "documentation":"

The type of the time series. The time series type can be one of the following values:

", + "location":"querystring", + "locationName":"timeSeriesType" + } + } + }, + "ListTimeSeriesResponse":{ + "type":"structure", + "required":["TimeSeriesSummaries"], + "members":{ + "TimeSeriesSummaries":{ + "shape":"TimeSeriesSummaries", + "documentation":"

One or more time series summaries to list.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" + } + } + }, + "ListTimeSeriesType":{ + "type":"string", + "enum":[ + "ASSOCIATED", + "DISASSOCIATED" + ] + }, "LoggingLevel":{ "type":"string", "enum":[ @@ -5022,7 +5334,7 @@ }, "kmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The Key ID of the customer managed customer master key (CMK) used for KMS encryption. This is required if you use KMS_BASED_ENCRYPTION.

" + "documentation":"

The Key ID of the customer managed key used for KMS encryption. This is required if you use KMS_BASED_ENCRYPTION.

" } } }, @@ -5039,7 +5351,7 @@ }, "kmsKeyArn":{ "shape":"ARN", - "documentation":"

The Key ARN of the KMS CMK used for KMS encryption if you use KMS_BASED_ENCRYPTION.

" + "documentation":"

The Key ARN of the KMS key used for KMS encryption if you use KMS_BASED_ENCRYPTION.

" }, "configurationStatus":{ "shape":"ConfigurationStatus", @@ -5073,6 +5385,10 @@ "multiLayerStorage":{ "shape":"MultiLayerStorage", "documentation":"

Identifies a storage destination. If you specified MULTI_LAYER_STORAGE for the storage type, you must specify a MultiLayerStorage object.

" + }, + "disassociatedDataStorage":{ + "shape":"DisassociatedDataStorageState", + "documentation":"

Contains the storage configuration for time series (data streams) that aren't associated with asset properties. The disassociatedDataStorage can be one of the following values:

For more information, see Data streams in the IoT SiteWise User Guide.

" } } }, @@ -5091,6 +5407,10 @@ "shape":"MultiLayerStorage", "documentation":"

Contains information about the storage destination.

" }, + "disassociatedDataStorage":{ + "shape":"DisassociatedDataStorageState", + "documentation":"

Contains the storage configuration for time series (data streams) that aren't associated with asset properties. The disassociatedDataStorage can be one of the following values:

For more information, see Data streams in the IoT SiteWise User Guide.

" + }, "configurationStatus":{"shape":"ConfigurationStatus"} } }, @@ -5277,6 +5597,59 @@ "DESCENDING" ] }, + "TimeSeriesId":{ + "type":"string", + "max":73, + "min":36 + }, + "TimeSeriesSummaries":{ + "type":"list", + "member":{"shape":"TimeSeriesSummary"} + }, + "TimeSeriesSummary":{ + "type":"structure", + "required":[ + "timeSeriesId", + "dataType", + "timeSeriesCreationDate", + "timeSeriesLastUpdateDate" + ], + "members":{ + "assetId":{ + "shape":"ID", + "documentation":"

The ID of the asset in which the asset property was created.

" + }, + "propertyId":{ + "shape":"ID", + "documentation":"

The ID of the asset property.

" + }, + "alias":{ + "shape":"PropertyAlias", + "documentation":"

The alias that identifies the time series.

" + }, + "timeSeriesId":{ + "shape":"TimeSeriesId", + "documentation":"

The ID of the time series.

" + }, + "dataType":{ + "shape":"PropertyDataType", + "documentation":"

The data type of the time series.

If you specify STRUCT, you must also specify dataTypeSpec to identify the type of the structure for this time series.

" + }, + "dataTypeSpec":{ + "shape":"Name", + "documentation":"

The data type of the structure for this time series. This parameter is required for time series that have the STRUCT data type.

The options for this parameter depend on the type of the composite model in which you created the asset property that is associated with your time series. Use AWS/ALARM_STATE for alarm state in alarm composite models.

" + }, + "timeSeriesCreationDate":{ + "shape":"Timestamp", + "documentation":"

The date that the time series was created, in Unix epoch time.

" + }, + "timeSeriesLastUpdateDate":{ + "shape":"Timestamp", + "documentation":"

The date that the time series was last updated, in Unix epoch time.

" + } + }, + "documentation":"

Contains a summary of a time series (data stream).

" + }, "Timestamp":{"type":"timestamp"}, "Timestamps":{ "type":"list", @@ -5350,10 +5723,10 @@ }, "offset":{ "shape":"Offset", - "documentation":"

The offset for the tumbling window. The offset parameter accepts the following:

" + "documentation":"

The offset for the tumbling window. The offset parameter accepts the following:

" } }, - "documentation":"

Contains a tumbling window, which is a repeating fixed-sized, non-overlapping, and contiguous time window. You use this window in metrics to aggregate data from properties and other assets.

You can use m, h, d, and w when you specify an interval or offset. Note that m represents minutes, and w represents weeks. You can also use s to represent seconds in offset.

The interval and offset parameters support the ISO 8601 format. For example, PT5S represents five seconds, PT5M represents five minutes, and PT5H represents five hours.

" + "documentation":"

Contains a tumbling window, which is a repeating fixed-sized, non-overlapping, and contiguous time window. You can use this window in metrics to aggregate data from properties and other assets.

You can use m, h, d, and w when you specify an interval or offset. Note that m represents minutes, h represents hours, d represents days, and w represents weeks. You can also use s to represent seconds in offset.

The interval and offset parameters support the ISO 8601 format. For example, PT5S represents 5 seconds, PT5M represents 5 minutes, and PT5H represents 5 hours.

" }, "UnauthorizedException":{ "type":"structure", diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index a06cbe265d..dfec8854f2 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -123,23 +123,6 @@ ], "documentation":"

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing.

You set the package type to Image if the deployment package is a container image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties.

You set the package type to Zip if the deployment package is a .zip file archive. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, the default value is x86-64.

When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Function States.

A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.

The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).

You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set set of signing profiles, which define the trusted publishers for this function.

If another account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based IAM policy. You can grant permissions at the function level, on a version, or on an alias.

To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Functions.

" }, - "CreateFunctionUrlConfig":{ - "name":"CreateFunctionUrlConfig", - "http":{ - "method":"POST", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":201 - }, - "input":{"shape":"CreateFunctionUrlConfigRequest"}, - "output":{"shape":"CreateFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"ResourceConflictException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ] - }, "DeleteAlias":{ "name":"DeleteAlias", "http":{ @@ -260,21 +243,6 @@ ], "documentation":"

Deletes the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" }, - "DeleteFunctionUrlConfig":{ - "name":"DeleteFunctionUrlConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionUrlConfigRequest"}, - "errors":[ - {"shape":"ResourceConflictException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ] - }, "DeleteLayerVersion":{ "name":"DeleteLayerVersion", "http":{ @@ -456,22 +424,6 @@ ], "documentation":"

Retrieves the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" }, - "GetFunctionUrlConfig":{ - "name":"GetFunctionUrlConfig", - "http":{ - "method":"GET", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":200 - }, - "input":{"shape":"GetFunctionUrlConfigRequest"}, - "output":{"shape":"GetFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"TooManyRequestsException"} - ] - }, "GetLayerVersion":{ "name":"GetLayerVersion", "http":{ @@ -681,22 +633,6 @@ ], "documentation":"

Retrieves a list of configurations for asynchronous invocation for a function.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" }, - "ListFunctionUrlConfigs":{ - "name":"ListFunctionUrlConfigs", - "http":{ - "method":"GET", - "requestUri":"/2021-10-31/functions/{FunctionName}/urls", - "responseCode":200 - }, - "input":{"shape":"ListFunctionUrlConfigsRequest"}, - "output":{"shape":"ListFunctionUrlConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"TooManyRequestsException"} - ] - }, "ListFunctions":{ "name":"ListFunctions", "http":{ @@ -1107,23 +1043,6 @@ {"shape":"ResourceConflictException"} ], "documentation":"

Updates the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" - }, - "UpdateFunctionUrlConfig":{ - "name":"UpdateFunctionUrlConfig", - "http":{ - "method":"PUT", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionUrlConfigRequest"}, - "output":{"shape":"UpdateFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"ResourceConflictException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ] } }, "shapes":{ @@ -1351,17 +1270,6 @@ }, "documentation":"

The traffic-shifting configuration of a Lambda function alias.

" }, - "AllowCredentials":{"type":"boolean"}, - "AllowMethodsList":{ - "type":"list", - "member":{"shape":"Method"}, - "max":6 - }, - "AllowOriginsList":{ - "type":"list", - "member":{"shape":"Origin"}, - "max":100 - }, "AllowedPublishers":{ "type":"structure", "required":["SigningProfileVersionArns"], @@ -1390,13 +1298,6 @@ "type":"string", "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" }, - "AuthorizationType":{ - "type":"string", - "enum":[ - "NONE", - "AWS_IAM" - ] - }, "BatchSize":{ "type":"integer", "max":10000, @@ -1531,17 +1432,6 @@ } } }, - "Cors":{ - "type":"structure", - "members":{ - "AllowCredentials":{"shape":"AllowCredentials"}, - "AllowHeaders":{"shape":"HeadersList"}, - "AllowMethods":{"shape":"AllowMethodsList"}, - "AllowOrigins":{"shape":"AllowOriginsList"}, - "ExposeHeaders":{"shape":"HeadersList"}, - "MaxAge":{"shape":"MaxAge"} - } - }, "CreateAliasRequest":{ "type":"structure", "required":[ @@ -1624,7 +1514,7 @@ }, "FilterCriteria":{ "shape":"FilterCriteria", - "documentation":"

(Streams and Amazon SQS) A object that defines the filter criteria used to determine whether Lambda should process an event. For more information, see Lambda event filtering.

" + "documentation":"

(Streams and Amazon SQS) An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.

" }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", @@ -1680,7 +1570,7 @@ }, "FunctionResponseTypes":{ "shape":"FunctionResponseTypeList", - "documentation":"

(Streams only) A list of current response type enums applied to the event source mapping.

" + "documentation":"

(Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.

" } } }, @@ -1698,7 +1588,7 @@ }, "Runtime":{ "shape":"Runtime", - "documentation":"

The identifier of the function's runtime.

" + "documentation":"

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive.

" }, "Role":{ "shape":"RoleArn", @@ -1706,7 +1596,7 @@ }, "Handler":{ "shape":"Handler", - "documentation":"

The name of the method within your code that Lambda calls to execute your function. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Programming Model.

" + "documentation":"

The name of the method within your code that Lambda calls to execute your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Programming Model.

" }, "Code":{ "shape":"FunctionCode", @@ -1778,43 +1668,6 @@ } } }, - "CreateFunctionUrlConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "AuthorizationType" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthorizationType":{"shape":"AuthorizationType"}, - "Cors":{"shape":"Cors"} - } - }, - "CreateFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthorizationType", - "CreationTime" - ], - "members":{ - "FunctionUrl":{"shape":"FunctionUrl"}, - "FunctionArn":{"shape":"FunctionArn"}, - "AuthorizationType":{"shape":"AuthorizationType"}, - "Cors":{"shape":"Cors"}, - "CreationTime":{"shape":"Timestamp"} - } - }, "Date":{"type":"timestamp"}, "DeadLetterConfig":{ "type":"structure", @@ -1936,22 +1789,6 @@ } } }, - "DeleteFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, "DeleteLayerVersionRequest":{ "type":"structure", "required":[ @@ -2210,7 +2047,7 @@ }, "FilterCriteria":{ "shape":"FilterCriteria", - "documentation":"

(Streams and Amazon SQS) A object that defines the filter criteria used to determine whether Lambda should process an event. For more information, see Event filtering.

" + "documentation":"

(Streams and Amazon SQS) An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.

" }, "FunctionArn":{ "shape":"FunctionArn", @@ -2326,10 +2163,10 @@ "members":{ "Pattern":{ "shape":"Pattern", - "documentation":"

A filter pattern. For more information on the syntax of a filter pattern, see Filter criteria syntax.

" + "documentation":"

A filter pattern. For more information on the syntax of a filter pattern, see Filter rule syntax.

" } }, - "documentation":"

An object that specifies a filter criteria.

" + "documentation":"

A structure within a FilterCriteria object that defines an event filtering pattern.

" }, "FilterCriteria":{ "type":"structure", @@ -2339,7 +2176,7 @@ "documentation":"

A list of filters.

" } }, - "documentation":"

An object that contains the filters on the event source.

" + "documentation":"

An object that contains the filters for an event source.

" }, "FilterList":{ "type":"list", @@ -2584,39 +2421,6 @@ "max":1, "min":0 }, - "FunctionUrl":{ - "type":"string", - "max":100, - "min":40 - }, - "FunctionUrlConfig":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "CreationTime", - "LastModifiedTime", - "AuthorizationType" - ], - "members":{ - "FunctionUrl":{"shape":"FunctionUrl"}, - "FunctionArn":{"shape":"FunctionArn"}, - "CreationTime":{"shape":"Timestamp"}, - "LastModifiedTime":{"shape":"Timestamp"}, - "Cors":{"shape":"Cors"}, - "AuthorizationType":{"shape":"AuthorizationType"} - } - }, - "FunctionUrlConfigList":{ - "type":"list", - "member":{"shape":"FunctionUrlConfig"} - }, - "FunctionUrlQualifier":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(^\\$LATEST$)|((?!^[0-9]+$)([a-zA-Z0-9-_]+))" - }, "FunctionVersion":{ "type":"string", "enum":["ALL"] @@ -2819,40 +2623,6 @@ } } }, - "GetFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthorizationType", - "CreationTime", - "LastModifiedTime" - ], - "members":{ - "FunctionUrl":{"shape":"FunctionUrl"}, - "FunctionArn":{"shape":"FunctionArn"}, - "AuthorizationType":{"shape":"AuthorizationType"}, - "Cors":{"shape":"Cors"}, - "CreationTime":{"shape":"Timestamp"}, - "LastModifiedTime":{"shape":"Timestamp"} - } - }, "GetLayerVersionByArnRequest":{ "type":"structure", "required":["Arn"], @@ -3047,16 +2817,6 @@ "max":128, "pattern":"[^\\s]+" }, - "Header":{ - "type":"string", - "max":1024, - "pattern":".*" - }, - "HeadersList":{ - "type":"list", - "member":{"shape":"Header"}, - "max":100 - }, "HttpStatus":{"type":"integer"}, "ImageConfig":{ "type":"structure", @@ -3692,35 +3452,6 @@ } } }, - "ListFunctionUrlConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxItems", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionUrlConfigsResponse":{ - "type":"structure", - "required":["FunctionUrlConfigs"], - "members":{ - "FunctionUrlConfigs":{"shape":"FunctionUrlConfigList"}, - "NextMarker":{"shape":"String"} - } - }, "ListFunctionsByCodeSigningConfigRequest":{ "type":"structure", "required":["CodeSigningConfigArn"], @@ -4004,21 +3735,11 @@ "type":"string", "pattern":"ALL|[a-z]{2}(-gov)?-[a-z]+-\\d{1}" }, - "MaxAge":{ - "type":"integer", - "max":86400, - "min":0 - }, "MaxFunctionEventInvokeConfigListItems":{ "type":"integer", "max":50, "min":1 }, - "MaxItems":{ - "type":"integer", - "max":50, - "min":1 - }, "MaxLayerListItems":{ "type":"integer", "max":50, @@ -4064,11 +3785,6 @@ "max":10240, "min":128 }, - "Method":{ - "type":"string", - "max":6, - "pattern":".*" - }, "NameSpacedFunctionArn":{ "type":"string", "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" @@ -4114,12 +3830,6 @@ "max":34, "pattern":"o-[a-z0-9]{10,32}" }, - "Origin":{ - "type":"string", - "max":253, - "min":1, - "pattern":".*" - }, "PackageType":{ "type":"string", "enum":[ @@ -4714,7 +4424,7 @@ "members":{ "Type":{ "shape":"SourceAccessType", - "documentation":"

The type of authentication protocol, VPC components, or virtual host for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

" + "documentation":"

The type of authentication protocol, VPC components, or virtual host for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

" }, "URI":{ "shape":"URI", @@ -5045,7 +4755,7 @@ }, "FilterCriteria":{ "shape":"FilterCriteria", - "documentation":"

(Streams and Amazon SQS) A object that defines the filter criteria used to determine whether Lambda should process an event. For more information, see Lambda event filtering.

" + "documentation":"

(Streams and Amazon SQS) An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.

" }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", @@ -5081,7 +4791,7 @@ }, "FunctionResponseTypes":{ "shape":"FunctionResponseTypeList", - "documentation":"

(Streams only) A list of current response type enums applied to the event source mapping.

" + "documentation":"

(Streams and Amazon SQS) A list of current response type enums applied to the event source mapping.

" } } }, @@ -5149,7 +4859,7 @@ }, "Handler":{ "shape":"Handler", - "documentation":"

The name of the method within your code that Lambda calls to execute your function. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Programming Model.

" + "documentation":"

The name of the method within your code that Lambda calls to execute your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Programming Model.

" }, "Description":{ "shape":"Description", @@ -5173,7 +4883,7 @@ }, "Runtime":{ "shape":"Runtime", - "documentation":"

The identifier of the function's runtime.

" + "documentation":"

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive.

" }, "DeadLetterConfig":{ "shape":"DeadLetterConfig", @@ -5235,42 +4945,6 @@ } } }, - "UpdateFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthorizationType":{"shape":"AuthorizationType"}, - "Cors":{"shape":"Cors"} - } - }, - "UpdateFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthorizationType", - "CreationTime", - "LastModifiedTime" - ], - "members":{ - "FunctionUrl":{"shape":"FunctionUrl"}, - "FunctionArn":{"shape":"FunctionArn"}, - "AuthorizationType":{"shape":"AuthorizationType"}, - "Cors":{"shape":"Cors"}, - "CreationTime":{"shape":"Timestamp"}, - "LastModifiedTime":{"shape":"Timestamp"} - } - }, "Version":{ "type":"string", "max":1024, diff --git a/botocore/data/proton/2020-07-20/paginators-1.json b/botocore/data/proton/2020-07-20/paginators-1.json index 0551934f0c..9f835e49c5 100644 --- a/botocore/data/proton/2020-07-20/paginators-1.json +++ b/botocore/data/proton/2020-07-20/paginators-1.json @@ -53,6 +53,47 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "tags" + }, + "ListEnvironmentOutputs": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "outputs" + }, + "ListEnvironmentProvisionedResources": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "provisionedResources" + }, + "ListRepositories": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "repositories" + }, + "ListRepositorySyncDefinitions": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "syncDefinitions" + }, + "ListServiceInstanceOutputs": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "outputs" + }, + "ListServiceInstanceProvisionedResources": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "provisionedResources" + }, + "ListServicePipelineOutputs": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "outputs" + }, + "ListServicePipelineProvisionedResources": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "provisionedResources" } } } diff --git a/botocore/data/proton/2020-07-20/service-2.json b/botocore/data/proton/2020-07-20/service-2.json index 5c6dfbc7d3..cc40e2fa81 100644 --- a/botocore/data/proton/2020-07-20/service-2.json +++ b/botocore/data/proton/2020-07-20/service-2.json @@ -29,7 +29,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, AWS Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", + "documentation":"

In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

For more information, see Environment account connections in the Proton Administrator guide.

", "idempotent":true }, "CancelEnvironmentDeployment":{ @@ -48,7 +48,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS. For more information, see Update an environment in the AWS Proton Administrator guide.

The following list includes potential cancellation scenarios.

" + "documentation":"

Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS. For more information, see Update an environment in the Proton Administrator guide.

The following list includes potential cancellation scenarios.

" }, "CancelServiceInstanceDeployment":{ "name":"CancelServiceInstanceDeployment", @@ -66,7 +66,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS. For more information, see Update a service instance in the AWS Proton Administrator guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

" + "documentation":"

Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS. For more information, see Update a service instance in the Proton Administrator guide or the Proton User guide.

The following list includes potential cancellation scenarios.

" }, "CancelServicePipelineDeployment":{ "name":"CancelServicePipelineDeployment", @@ -84,7 +84,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS. For more information, see Update a service pipeline in the AWS Proton Administrator guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

" + "documentation":"

Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS. For more information, see Update a service pipeline in the Proton Administrator guide or the Proton User guide.

The following list includes potential cancellation scenarios.

" }, "CreateEnvironment":{ "name":"CreateEnvironment", @@ -103,7 +103,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deploy a new environment. An AWS Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services. For more information, see the Environments in the AWS Proton Administrator Guide.

", + "documentation":"

Deploy a new environment. An Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services.

You can provision environments using the following methods:

For more information, see the Environments in the Proton Administrator Guide.

", "idempotent":true }, "CreateEnvironmentAccountConnection":{ @@ -122,7 +122,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from a management account.

An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the AWS Proton Administrator guide.

", + "documentation":"

Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from a management account.

An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the Proton Administrator guide.

", "idempotent":true }, "CreateEnvironmentTemplate":{ @@ -141,7 +141,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an environment template for AWS Proton. For more information, see Environment Templates in the AWS Proton Administrator Guide.

You can create an environment template in one of the two following ways:

", + "documentation":"

Create an environment template for Proton. For more information, see Environment Templates in the Proton Administrator Guide.

You can create an environment template in one of the two following ways:

", "idempotent":true }, "CreateEnvironmentTemplateVersion":{ @@ -164,6 +164,25 @@ "documentation":"

Create a new major or minor version of an environment template. A major version of an environment template is a version that isn't backwards compatible. A minor version of an environment template is a version that's backwards compatible within its major version.

", "idempotent":true }, + "CreateRepository":{ + "name":"CreateRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateRepositoryInput"}, + "output":{"shape":"CreateRepositoryOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Create and register a link to a repository that can be used with pull request provisioning or template sync configurations. For more information, see Template bundles and Template sync configurations in the Proton Administrator Guide.

", + "idempotent":true + }, "CreateService":{ "name":"CreateService", "http":{ @@ -181,7 +200,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an AWS Proton service. An AWS Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the AWS Proton Administrator Guide and Services in the AWS Proton User Guide.

", + "documentation":"

Create an Proton service. An Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the Proton Administrator Guide and Services in the Proton User Guide.

", "idempotent":true }, "CreateServiceTemplate":{ @@ -200,7 +219,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CICD service pipeline. Developers, in turn, select the service template from AWS Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. AWS Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the AWS Proton Administrator Guide.

", + "documentation":"

Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CICD service pipeline. Developers, in turn, select the service template from Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the Proton Administrator Guide.

", "idempotent":true }, "CreateServiceTemplateVersion":{ @@ -220,7 +239,26 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create a new major or minor version of a service template. A major version of a service template is a version that isn't backwards compatible. A minor version of a service template is a version that's backwards compatible within its major version.

", + "documentation":"

Create a new major or minor version of a service template. A major version of a service template is a version that isn't backward compatible. A minor version of a service template is a version that's backward compatible within its major version.

", + "idempotent":true + }, + "CreateTemplateSyncConfig":{ + "name":"CreateTemplateSyncConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateTemplateSyncConfigInput"}, + "output":{"shape":"CreateTemplateSyncConfigOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Set up a template for automated template version creation. When a commit is pushed to your registered repository, Proton checks for changes to your repository template bundles. If it detects a template bundle change, a new minor or major version of its template is created, if the version doesn’t already exist. For more information, see Template sync configurations in the Proton Administrator Guide.

", "idempotent":true }, "DeleteEnvironment":{ @@ -258,7 +296,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In an environment account, delete an environment account connection.

After you delete an environment account connection that’s in use by an AWS Proton environment, AWS Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", + "documentation":"

In an environment account, delete an environment account connection.

After you delete an environment account connection that’s in use by an Proton environment, Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.

For more information, see Environment account connections in the Proton Administrator guide.

", "idempotent":true }, "DeleteEnvironmentTemplate":{ @@ -296,7 +334,26 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

If no other minor versions of an environment template exist, delete a major version of the environment template if it's not the Recommended version. Delete the Recommended version of the environment template if no other major versions or minor versions of the environment template exist. A major version of an environment template is a version that's not backwards compatible.

Delete a minor version of an environment template if it isn't the Recommended version. Delete a Recommended minor version of the environment template if no other minor versions of the environment template exist. A minor version of an environment template is a version that's backwards compatible.

", + "documentation":"

If no other minor versions of an environment template exist, delete a major version of the environment template if it's not the Recommended version. Delete the Recommended version of the environment template if no other major versions or minor versions of the environment template exist. A major version of an environment template is a version that's not backward compatible.

Delete a minor version of an environment template if it isn't the Recommended version. Delete a Recommended minor version of the environment template if no other minor versions of the environment template exist. A minor version of an environment template is a version that's backward compatible.

", + "idempotent":true + }, + "DeleteRepository":{ + "name":"DeleteRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteRepositoryInput"}, + "output":{"shape":"DeleteRepositoryOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

De-register and unlink your repository.

", "idempotent":true }, "DeleteService":{ @@ -356,6 +413,25 @@ "documentation":"

If no other minor versions of a service template exist, delete a major version of the service template if it's not the Recommended version. Delete the Recommended version of the service template if no other major versions or minor versions of the service template exist. A major version of a service template is a version that isn't backwards compatible.

Delete a minor version of a service template if it's not the Recommended version. Delete a Recommended minor version of the service template if no other minor versions of the service template exist. A minor version of a service template is a version that's backwards compatible.

", "idempotent":true }, + "DeleteTemplateSyncConfig":{ + "name":"DeleteTemplateSyncConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTemplateSyncConfigInput"}, + "output":{"shape":"DeleteTemplateSyncConfigOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Delete a template sync configuration.

", + "idempotent":true + }, "GetAccountSettings":{ "name":"GetAccountSettings", "http":{ @@ -371,7 +447,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Get detail data for the AWS Proton pipeline service role.

" + "documentation":"

Get detail data for the Proton pipeline service role.

" }, "GetEnvironment":{ "name":"GetEnvironment", @@ -405,7 +481,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In an environment account, view the detail data for an environment account connection.

For more information, see Environment account connections in the AWS Proton Administrator guide.

" + "documentation":"

In an environment account, view the detail data for an environment account connection.

For more information, see Environment account connections in the Proton Administrator guide.

" }, "GetEnvironmentTemplate":{ "name":"GetEnvironmentTemplate", @@ -441,6 +517,40 @@ ], "documentation":"

View detail data for a major or minor version of an environment template.

" }, + "GetRepository":{ + "name":"GetRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositoryInput"}, + "output":{"shape":"GetRepositoryOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Get detail data for a repository.

" + }, + "GetRepositorySyncStatus":{ + "name":"GetRepositorySyncStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRepositorySyncStatusInput"}, + "output":{"shape":"GetRepositorySyncStatusOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Get the repository sync status.

" + }, "GetService":{ "name":"GetService", "http":{ @@ -509,6 +619,40 @@ ], "documentation":"

View detail data for a major or minor version of a service template.

" }, + "GetTemplateSyncConfig":{ + "name":"GetTemplateSyncConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTemplateSyncConfigInput"}, + "output":{"shape":"GetTemplateSyncConfigOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Get detail data for a template sync configuration.

" + }, + "GetTemplateSyncStatus":{ + "name":"GetTemplateSyncStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTemplateSyncStatusInput"}, + "output":{"shape":"GetTemplateSyncStatusOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Get the status of a template sync.

" + }, "ListEnvironmentAccountConnections":{ "name":"ListEnvironmentAccountConnections", "http":{ @@ -523,7 +667,41 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

View a list of environment account connections.

For more information, see Environment account connections in the AWS Proton Administrator guide.

" + "documentation":"

View a list of environment account connections.

For more information, see Environment account connections in the Proton Administrator guide.

" + }, + "ListEnvironmentOutputs":{ + "name":"ListEnvironmentOutputs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEnvironmentOutputsInput"}, + "output":{"shape":"ListEnvironmentOutputsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List the infrastructure as code outputs for your environment.

" + }, + "ListEnvironmentProvisionedResources":{ + "name":"ListEnvironmentProvisionedResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEnvironmentProvisionedResourcesInput"}, + "output":{"shape":"ListEnvironmentProvisionedResourcesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List the provisioned resources for your environment.

" }, "ListEnvironmentTemplateVersions":{ "name":"ListEnvironmentTemplateVersions", @@ -575,6 +753,73 @@ ], "documentation":"

List environments with detail data summaries.

" }, + "ListRepositories":{ + "name":"ListRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRepositoriesInput"}, + "output":{"shape":"ListRepositoriesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List repositories with detail data.

" + }, + "ListRepositorySyncDefinitions":{ + "name":"ListRepositorySyncDefinitions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRepositorySyncDefinitionsInput"}, + "output":{"shape":"ListRepositorySyncDefinitionsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List repository sync definitions with detail data.

" + }, + "ListServiceInstanceOutputs":{ + "name":"ListServiceInstanceOutputs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServiceInstanceOutputsInput"}, + "output":{"shape":"ListServiceInstanceOutputsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

View a list service instance infrastructure as code outputs with detail data.

" + }, + "ListServiceInstanceProvisionedResources":{ + "name":"ListServiceInstanceProvisionedResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServiceInstanceProvisionedResourcesInput"}, + "output":{"shape":"ListServiceInstanceProvisionedResourcesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List provisioned resources for a service instance with details.

" + }, "ListServiceInstances":{ "name":"ListServiceInstances", "http":{ @@ -592,6 +837,40 @@ ], "documentation":"

List service instances with summaries of detail data.

" }, + "ListServicePipelineOutputs":{ + "name":"ListServicePipelineOutputs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServicePipelineOutputsInput"}, + "output":{"shape":"ListServicePipelineOutputsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

View a list service pipeline infrastructure as code outputs with detail.

" + }, + "ListServicePipelineProvisionedResources":{ + "name":"ListServicePipelineProvisionedResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServicePipelineProvisionedResourcesInput"}, + "output":{"shape":"ListServicePipelineProvisionedResourcesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List provisioned resources for a service and pipeline with details.

" + }, "ListServiceTemplateVersions":{ "name":"ListServiceTemplateVersions", "http":{ @@ -656,7 +935,26 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

List tags for a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" + "documentation":"

List tags for a resource. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + }, + "NotifyResourceDeploymentStatusChange":{ + "name":"NotifyResourceDeploymentStatusChange", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"NotifyResourceDeploymentStatusChangeInput"}, + "output":{"shape":"NotifyResourceDeploymentStatusChangeOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Notify Proton of status changes to a provisioned resource when you use pull request provisioning. For more information, see Template bundles.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" }, "RejectEnvironmentAccountConnection":{ "name":"RejectEnvironmentAccountConnection", @@ -674,7 +972,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In a management account, reject an environment account connection from another environment account.

After you reject an environment account connection request, you won’t be able to accept or use the rejected environment account connection.

You can’t reject an environment account connection that is connected to an environment.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", + "documentation":"

In a management account, reject an environment account connection from another environment account.

After you reject an environment account connection request, you won’t be able to accept or use the rejected environment account connection.

You can’t reject an environment account connection that is connected to an environment.

For more information, see Environment account connections in the Proton Administrator guide.

", "idempotent":true }, "TagResource":{ @@ -693,7 +991,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Tag a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", + "documentation":"

Tag a resource. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

", "idempotent":true }, "UntagResource":{ @@ -712,7 +1010,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Remove a tag from a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", + "documentation":"

Remove a tag from a resource. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

", "idempotent":true }, "UpdateAccountSettings":{ @@ -730,7 +1028,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Update the AWS Proton pipeline service account settings.

" + "documentation":"

Update the Proton service pipeline role or repository settings.

" }, "UpdateEnvironment":{ "name":"UpdateEnvironment", @@ -748,7 +1046,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Update an environment.

If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn parameter to update or connect to an environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and is associated with the current environment.

If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId parameter to update or connect to an environment account connection.

You can update either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. You can’t update both.

There are four modes for updating an environment as described in the following. The deploymentType field defines the mode.

NONE

In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.

CURRENT_VERSION

In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type.

MINOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.

MAJOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version (optional).

" + "documentation":"

Update an environment.

If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn and provisioningRepository parameter to update or connect to an environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and is associated with the current environment.

If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId parameter to update or connect to an environment account connection.

You can update either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. You can’t update both.

If the environment was provisioned with pull request provisioning, include the provisioningRepository parameter and omit the protonServiceRoleArn and environmentAccountConnectionId parameters.

If the environment wasn't provisioned with pull request provisioning, omit the provisioningRepository parameter.

There are four modes for updating an environment as described in the following. The deploymentType field defines the mode.

NONE

In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.

CURRENT_VERSION

In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type.

MINOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.

MAJOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version (optional).

" }, "UpdateEnvironmentAccountConnection":{ "name":"UpdateEnvironmentAccountConnection", @@ -766,7 +1064,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In an environment account, update an environment account connection to use a new IAM role.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", + "documentation":"

In an environment account, update an environment account connection to use a new IAM role.

For more information, see Environment account connections in the Proton Administrator guide.

", "idempotent":true }, "UpdateEnvironmentTemplate":{ @@ -895,6 +1193,24 @@ {"shape":"InternalServerException"} ], "documentation":"

Update a major or minor version of a service template.

" + }, + "UpdateTemplateSyncConfig":{ + "name":"UpdateTemplateSyncConfig", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateTemplateSyncConfigInput"}, + "output":{"shape":"UpdateTemplateSyncConfigOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Update template sync configuration parameters, except for the templateName and templateType.

" } }, "shapes":{ @@ -914,7 +1230,7 @@ "members":{ "environmentAccountConnection":{ "shape":"EnvironmentAccountConnection", - "documentation":"

The environment account connection data that's returned by AWS Proton.

" + "documentation":"

The environment account connection data that's returned by Proton.

" } } }, @@ -930,12 +1246,16 @@ "AccountSettings":{ "type":"structure", "members":{ + "pipelineProvisioningRepository":{ + "shape":"RepositoryBranch", + "documentation":"

The repository that you provide with pull request provisioning.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" + }, "pipelineServiceRoleArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Proton pipeline service role.

" + "shape":"PipelineRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Proton pipeline service role.

" } }, - "documentation":"

The AWS Proton pipeline service role data.

" + "documentation":"

The Proton pipeline service role and repository data.

" }, "Arn":{ "type":"string", @@ -962,7 +1282,7 @@ "members":{ "environment":{ "shape":"Environment", - "documentation":"

The environment summary data that's returned by AWS Proton.

" + "documentation":"

The environment summary data that's returned by Proton.

" } } }, @@ -989,7 +1309,7 @@ "members":{ "serviceInstance":{ "shape":"ServiceInstance", - "documentation":"

The service instance summary data that's returned by AWS Proton.

" + "documentation":"

The service instance summary data that's returned by Proton.

" } } }, @@ -1009,7 +1329,7 @@ "members":{ "pipeline":{ "shape":"ServicePipeline", - "documentation":"

The service pipeline detail data that's returned by AWS Proton.

" + "documentation":"

The service pipeline detail data that's returned by Proton.

" } } }, @@ -1084,20 +1404,24 @@ "members":{ "clientToken":{ "shape":"ClientToken", - "documentation":"

When included, if two identicial requests are made with the same client token, AWS Proton returns the environment account connection that the first request created.

", + "documentation":"

When included, if two identical requests are made with the same client token, Proton returns the environment account connection that the first request created.

", "idempotencyToken":true }, "environmentName":{ "shape":"ResourceName", - "documentation":"

The name of the AWS Proton environment that's created in the associated management account.

" + "documentation":"

The name of the Proton environment that's created in the associated management account.

" }, "managementAccountId":{ "shape":"AwsAccountId", - "documentation":"

The ID of the management account that accepts or rejects the environment account connection. You create an manage the AWS Proton environment in this account. If the management account accepts the environment account connection, AWS Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

" + "documentation":"

The ID of the management account that accepts or rejects the environment account connection. You create an manage the Proton environment in this account. If the management account accepts the environment account connection, Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

" }, "roleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that's created in the environment account. AWS Proton uses this role to provision infrastructure resources in the associated environment account.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that's created in the environment account. Proton uses this role to provision infrastructure resources in the associated environment account.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

Tags for your environment account connection. For more information, see Proton resources and tagging in the Proton Administrator Guide.

" } } }, @@ -1107,7 +1431,7 @@ "members":{ "environmentAccountConnection":{ "shape":"EnvironmentAccountConnection", - "documentation":"

The environment account connection detail data that's returned by AWS Proton.

" + "documentation":"

The environment account connection detail data that's returned by Proton.

" } } }, @@ -1126,7 +1450,7 @@ }, "environmentAccountConnectionId":{ "shape":"EnvironmentAccountConnectionId", - "documentation":"

The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. You must include either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. For more information, see Environment account connections in the AWS Proton Administrator guide.

" + "documentation":"

The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. You must include either the environmentAccountConnectionId or protonServiceRoleArn parameter and value and omit the provisioningRepository parameter and values. For more information, see Environment account connections in the Proton Administrator guide.

" }, "name":{ "shape":"ResourceName", @@ -1134,27 +1458,31 @@ }, "protonServiceRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf. You must include either the environmentAccountConnectionId or protonServiceRoleArn parameter and value.

" + "documentation":"

The Amazon Resource Name (ARN) of the Proton service role that allows Proton to make calls to other services on your behalf. You must include either the environmentAccountConnectionId or protonServiceRoleArn parameter and value and omit the provisioningRepository parameter when you use standard provisioning.

" + }, + "provisioningRepository":{ + "shape":"RepositoryBranchInput", + "documentation":"

The repository that you provide with pull request provisioning. If you provide this parameter, you must omit the environmentAccountConnectionId and protonServiceRoleArn parameters.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" }, "spec":{ "shape":"SpecContents", - "documentation":"

A link to a YAML formatted spec file that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the AWS Proton Administrator Guide.

" + "documentation":"

A link to a YAML formatted spec file that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the Proton Administrator Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

Create tags for your environment. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" + "documentation":"

Create tags for your environment. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" }, "templateMajorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the major version of the environment template.

" + "documentation":"

The major version of the environment template.

" }, "templateMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of the environment template.

" + "documentation":"

The minor version of the environment template.

" }, "templateName":{ "shape":"ResourceName", - "documentation":"

The name of the environment template. For more information, see Environment Templates in the AWS Proton Administrator Guide.

" + "documentation":"

The name of the environment template. For more information, see Environment Templates in the Proton Administrator Guide.

" } } }, @@ -1164,7 +1492,7 @@ "members":{ "environment":{ "shape":"Environment", - "documentation":"

The environment detail data that's returned by AWS Proton.

" + "documentation":"

The environment detail data that's returned by Proton.

" } } }, @@ -1182,7 +1510,7 @@ }, "encryptionKey":{ "shape":"Arn", - "documentation":"

A customer provided encryption key that AWS Proton uses to encrypt data.

" + "documentation":"

A customer provided encryption key that Proton uses to encrypt data.

" }, "name":{ "shape":"ResourceName", @@ -1194,7 +1522,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

Create tags for your environment template. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" + "documentation":"

Create tags for your environment template. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" } } }, @@ -1204,7 +1532,7 @@ "members":{ "environmentTemplate":{ "shape":"EnvironmentTemplate", - "documentation":"

The environment template detail data that's returned by AWS Proton.

" + "documentation":"

The environment template detail data that's returned by Proton.

" } } }, @@ -1217,7 +1545,7 @@ "members":{ "clientToken":{ "shape":"ClientToken", - "documentation":"

When included, if two identicial requests are made with the same client token, AWS Proton returns the environment template version that the first request created.

", + "documentation":"

When included, if two identical requests are made with the same client token, Proton returns the environment template version that the first request created.

", "idempotencyToken":true }, "description":{ @@ -1226,7 +1554,7 @@ }, "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

To create a new minor version of the environment template, include a majorVersion.

To create a new major and minor version of the environment template, exclude majorVersion.

" + "documentation":"

To create a new minor version of the environment template, include a major Version.

To create a new major and minor version of the environment template, exclude major Version.

" }, "source":{ "shape":"TemplateVersionSourceInput", @@ -1248,7 +1576,43 @@ "members":{ "environmentTemplateVersion":{ "shape":"EnvironmentTemplateVersion", - "documentation":"

The environment template detail data that's returned by AWS Proton.

" + "documentation":"

The environment template detail data that's returned by Proton.

" + } + } + }, + "CreateRepositoryInput":{ + "type":"structure", + "required":[ + "connectionArn", + "name", + "provider" + ], + "members":{ + "connectionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of your Amazon Web Services CodeStar connection. For more information, see Setting up for Proton in the Proton Administrator Guide.

" + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

The ARN of your customer Amazon Web Services Key Management Service (Amazon Web Services KMS) key.

" + }, + "name":{ + "shape":"RepositoryName", + "documentation":"

The repository name, for example myrepos/myrepo.

" + }, + "provider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + } + } + }, + "CreateRepositoryOutput":{ + "type":"structure", + "required":["repository"], + "members":{ + "repository":{ + "shape":"Repository", + "documentation":"

The repository detail data that's returned by Proton.

" } } }, @@ -1263,11 +1627,11 @@ "members":{ "branchName":{ "shape":"GitBranchName", - "documentation":"

The name of the code repository branch that holds the code that's deployed in AWS Proton. Don't include this parameter if your service template doesn't include a service pipeline.

" + "documentation":"

The name of the code repository branch that holds the code that's deployed in Proton. Don't include this parameter if your service template doesn't include a service pipeline.

" }, "description":{ "shape":"Description", - "documentation":"

A description of the AWS Proton service.

" + "documentation":"

A description of the Proton service.

" }, "name":{ "shape":"ResourceName", @@ -1275,7 +1639,7 @@ }, "repositoryConnectionArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up repository connection in the AWS Proton Administrator Guide and Setting up with AWS Proton in the AWS Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.

" + "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up repository connection in the Proton Administrator Guide and Setting up with Proton in the Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.

" }, "repositoryId":{ "shape":"RepositoryId", @@ -1283,19 +1647,19 @@ }, "spec":{ "shape":"SpecContents", - "documentation":"

A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the AWS Proton Administrator Guide and Create a service in the AWS Proton User Guide.

" + "documentation":"

A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the Proton Administrator Guide and Create a service in the Proton User Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

Create tags for your service. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" + "documentation":"

Create tags for your service. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" }, "templateMajorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the major version of the service template that was used to create the service.

" + "documentation":"

The major version of the service template that was used to create the service.

" }, "templateMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of the service template that was used to create the service.

" + "documentation":"

The minor version of the service template that was used to create the service.

" }, "templateName":{ "shape":"ResourceName", @@ -1309,7 +1673,7 @@ "members":{ "service":{ "shape":"Service", - "documentation":"

The service detail data that's returned by AWS Proton.

" + "documentation":"

The service detail data that's returned by Proton.

" } } }, @@ -1335,11 +1699,11 @@ }, "pipelineProvisioning":{ "shape":"Provisioning", - "documentation":"

AWS Proton includes a service pipeline for your service by default. When included, this parameter indicates that an AWS Proton service pipeline won't be included for your service. Once specified, this parameter can't be changed. For more information, see Service template bundles in the AWS Proton Administrator Guide.

" + "documentation":"

Proton includes a service pipeline for your service by default. When included, this parameter indicates that an Proton service pipeline won't be included for your service. Once specified, this parameter can't be changed. For more information, see Service template bundles in the Proton Administrator Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

Create tags for your service template. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

" + "documentation":"

Create tags for your service template. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" } } }, @@ -1349,7 +1713,7 @@ "members":{ "serviceTemplate":{ "shape":"ServiceTemplate", - "documentation":"

The service template detail data that's returned by AWS Proton.

" + "documentation":"

The service template detail data that's returned by Proton.

" } } }, @@ -1363,7 +1727,7 @@ "members":{ "clientToken":{ "shape":"ClientToken", - "documentation":"

When included, if two identicial requests are made with the same client token, AWS Proton returns the service template version that the first request created.

", + "documentation":"

When included, if two identical requests are made with the same client token, Proton returns the service template version that the first request created.

", "idempotencyToken":true }, "compatibleEnvironmentTemplates":{ @@ -1376,7 +1740,7 @@ }, "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

To create a new minor version of the service template, include a majorVersion.

To create a new major and minor version of the service template, exclude majorVersion.

" + "documentation":"

To create a new minor version of the service template, include a major Version.

To create a new major and minor version of the service template, exclude major Version.

" }, "source":{ "shape":"TemplateVersionSourceInput", @@ -1398,13 +1762,58 @@ "members":{ "serviceTemplateVersion":{ "shape":"ServiceTemplateVersion", - "documentation":"

The service template version summary of detail data that's returned by AWS Proton.

" + "documentation":"

The service template version summary of detail data that's returned by Proton.

" } } }, - "DeleteEnvironmentAccountConnectionInput":{ + "CreateTemplateSyncConfigInput":{ "type":"structure", - "required":["id"], + "required":[ + "branch", + "repositoryName", + "repositoryProvider", + "templateName", + "templateType" + ], + "members":{ + "branch":{ + "shape":"GitBranchName", + "documentation":"

The branch of the registered repository for your template.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of your repository, for example myrepos/myrepo.

" + }, + "repositoryProvider":{ + "shape":"RepositoryProvider", + "documentation":"

The provider type for your repository.

" + }, + "subdirectory":{ + "shape":"Subdirectory", + "documentation":"

A repository subdirectory path to your template bundle directory. When included, Proton limits the template bundle search to this repository directory.

" + }, + "templateName":{ + "shape":"ResourceName", + "documentation":"

The name of your registered template.

" + }, + "templateType":{ + "shape":"TemplateType", + "documentation":"

The type of the registered template.

" + } + } + }, + "CreateTemplateSyncConfigOutput":{ + "type":"structure", + "members":{ + "templateSyncConfig":{ + "shape":"TemplateSyncConfig", + "documentation":"

The template sync configuration detail data that's returned by Proton.

" + } + } + }, + "DeleteEnvironmentAccountConnectionInput":{ + "type":"structure", + "required":["id"], "members":{ "id":{ "shape":"EnvironmentAccountConnectionId", @@ -1417,7 +1826,7 @@ "members":{ "environmentAccountConnection":{ "shape":"EnvironmentAccountConnection", - "documentation":"

The environment account connection detail data that's returned by AWS Proton.

" + "documentation":"

The environment account connection detail data that's returned by Proton.

" } } }, @@ -1436,7 +1845,7 @@ "members":{ "environment":{ "shape":"Environment", - "documentation":"

The environment detail data that's returned by AWS Proton.

" + "documentation":"

The environment detail data that's returned by Proton.

" } } }, @@ -1455,7 +1864,7 @@ "members":{ "environmentTemplate":{ "shape":"EnvironmentTemplate", - "documentation":"

The environment template detail data that's returned by AWS Proton.

" + "documentation":"

The environment template detail data that's returned by Proton.

" } } }, @@ -1486,7 +1895,33 @@ "members":{ "environmentTemplateVersion":{ "shape":"EnvironmentTemplateVersion", - "documentation":"

The environment template version detail data that's returned by AWS Proton.

" + "documentation":"

The environment template version detail data that's returned by Proton.

" + } + } + }, + "DeleteRepositoryInput":{ + "type":"structure", + "required":[ + "name", + "provider" + ], + "members":{ + "name":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository.

" + }, + "provider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + } + } + }, + "DeleteRepositoryOutput":{ + "type":"structure", + "members":{ + "repository":{ + "shape":"Repository", + "documentation":"

The repository detail data that's returned by Proton.

" } } }, @@ -1505,7 +1940,7 @@ "members":{ "service":{ "shape":"Service", - "documentation":"

The service detail data that's returned by AWS Proton.

" + "documentation":"

The service detail data that's returned by Proton.

" } } }, @@ -1524,7 +1959,7 @@ "members":{ "serviceTemplate":{ "shape":"ServiceTemplate", - "documentation":"

The service template detail data that's returned by AWS Proton.

" + "documentation":"

The service template detail data that's returned by Proton.

" } } }, @@ -1555,10 +1990,40 @@ "members":{ "serviceTemplateVersion":{ "shape":"ServiceTemplateVersion", - "documentation":"

The service template version detail data that's returned by AWS Proton.

" + "documentation":"

The service template version detail data that's returned by Proton.

" + } + } + }, + "DeleteTemplateSyncConfigInput":{ + "type":"structure", + "required":[ + "templateName", + "templateType" + ], + "members":{ + "templateName":{ + "shape":"ResourceName", + "documentation":"

The template name.

" + }, + "templateType":{ + "shape":"TemplateType", + "documentation":"

The template type.

" + } + } + }, + "DeleteTemplateSyncConfigOutput":{ + "type":"structure", + "members":{ + "templateSyncConfig":{ + "shape":"TemplateSyncConfig", + "documentation":"

The template sync configuration detail data that's returned by Proton.

" } } }, + "DeploymentId":{ + "type":"string", + "pattern":"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + }, "DeploymentStatus":{ "type":"string", "enum":[ @@ -1593,6 +2058,11 @@ "min":1, "sensitive":true }, + "EmptyNextToken":{ + "type":"string", + "max":0, + "min":0 + }, "Environment":{ "type":"structure", "required":[ @@ -1649,12 +2119,16 @@ }, "protonServiceRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

" + "documentation":"

The Amazon Resource Name (ARN) of the Proton service role that allows Proton to make calls to other services on your behalf.

" }, "provisioning":{ "shape":"Provisioning", "documentation":"

When included, indicates that the environment template is for customer provisioned and managed infrastructure.

" }, + "provisioningRepository":{ + "shape":"RepositoryBranch", + "documentation":"

The repository that you provide with pull request provisioning.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" + }, "spec":{ "shape":"SpecContents", "documentation":"

The environment spec.

" @@ -1672,7 +2146,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the environment template.

" } }, - "documentation":"

The environment detail data. An AWS Proton environment is a set resources shared across an AWS Proton service.

" + "documentation":"

The environment detail data. An Proton environment is a set resources shared across an Proton service.

" }, "EnvironmentAccountConnection":{ "type":"structure", @@ -1865,7 +2339,7 @@ }, "protonServiceRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

" + "documentation":"

The Amazon Resource Name (ARN) of the Proton service role that allows Proton to make calls to other services on your behalf.

" }, "provisioning":{ "shape":"Provisioning", @@ -1873,11 +2347,11 @@ }, "templateMajorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the major version of the environment template.

" + "documentation":"

The major version of the environment template.

" }, "templateMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of the environment template.

" + "documentation":"

The minor version of the environment template.

" }, "templateName":{ "shape":"ResourceName", @@ -2000,7 +2474,7 @@ }, "recommendedVersion":{ "shape":"FullTemplateVersionNumber", - "documentation":"

The ID of the recommended version of the environment template.

" + "documentation":"

The recommended version of the environment template.

" } }, "documentation":"

The environment template data.

" @@ -2039,15 +2513,15 @@ }, "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the latest major version that's associated with the version of an environment template.

" + "documentation":"

The latest major version that's associated with the version of an environment template.

" }, "minorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of an environment template.

" + "documentation":"

The minor version of an environment template.

" }, "recommendedMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the recommended minor version of the environment template.

" + "documentation":"

The recommended minor version of the environment template.

" }, "schema":{ "shape":"TemplateSchema", @@ -2099,15 +2573,15 @@ }, "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the latest major version that's associated with the version of an environment template.

" + "documentation":"

The latest major version that's associated with the version of an environment template.

" }, "minorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the version of an environment template.

" + "documentation":"

The version of an environment template.

" }, "recommendedMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the recommended minor version of the environment template.

" + "documentation":"

The recommended minor version of the environment template.

" }, "status":{ "shape":"TemplateVersionStatus", @@ -2148,7 +2622,7 @@ "members":{ "accountSettings":{ "shape":"AccountSettings", - "documentation":"

The AWS Proton pipeline service role detail data that's returned by AWS Proton.

" + "documentation":"

The Proton pipeline service role detail data that's returned by Proton.

" } } }, @@ -2168,7 +2642,7 @@ "members":{ "environmentAccountConnection":{ "shape":"EnvironmentAccountConnection", - "documentation":"

The environment account connection detail data that's returned by AWS Proton.

" + "documentation":"

The environment account connection detail data that's returned by Proton.

" } } }, @@ -2188,7 +2662,7 @@ "members":{ "environment":{ "shape":"Environment", - "documentation":"

The environment detail data that's returned by AWS Proton.

" + "documentation":"

The environment detail data that's returned by Proton.

" } } }, @@ -2208,7 +2682,7 @@ "members":{ "environmentTemplate":{ "shape":"EnvironmentTemplate", - "documentation":"

The environment template detail data that's returned by AWS Proton.

" + "documentation":"

The environment template detail data that's returned by Proton.

" } } }, @@ -2222,7 +2696,7 @@ "members":{ "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

To view environment template major version detail data, include majorVersion.

" + "documentation":"

To view environment template major version detail data, include major Version.

" }, "minorVersion":{ "shape":"TemplateVersionPart", @@ -2240,7 +2714,70 @@ "members":{ "environmentTemplateVersion":{ "shape":"EnvironmentTemplateVersion", - "documentation":"

The environment template version detail data that's returned by AWS Proton.

" + "documentation":"

The environment template version detail data that's returned by Proton.

" + } + } + }, + "GetRepositoryInput":{ + "type":"structure", + "required":[ + "name", + "provider" + ], + "members":{ + "name":{ + "shape":"RepositoryName", + "documentation":"

The repository name, for example myrepos/myrepo.

" + }, + "provider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + } + } + }, + "GetRepositoryOutput":{ + "type":"structure", + "required":["repository"], + "members":{ + "repository":{ + "shape":"Repository", + "documentation":"

The repository detail data that's returned by Proton.

" + } + } + }, + "GetRepositorySyncStatusInput":{ + "type":"structure", + "required":[ + "branch", + "repositoryName", + "repositoryProvider", + "syncType" + ], + "members":{ + "branch":{ + "shape":"GitBranchName", + "documentation":"

The repository branch.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository name.

" + }, + "repositoryProvider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + }, + "syncType":{ + "shape":"SyncType", + "documentation":"

The repository sync type.

" + } + } + }, + "GetRepositorySyncStatusOutput":{ + "type":"structure", + "members":{ + "latestSync":{ + "shape":"RepositorySyncAttempt", + "documentation":"

The repository sync status detail data that's returned by Proton.

" } } }, @@ -2277,7 +2814,7 @@ "members":{ "serviceInstance":{ "shape":"ServiceInstance", - "documentation":"

The service instance detail data that's returned by AWS Proton.

" + "documentation":"

The service instance detail data that's returned by Proton.

" } } }, @@ -2286,7 +2823,7 @@ "members":{ "service":{ "shape":"Service", - "documentation":"

The service detail data that's returned by AWS Proton.

" + "documentation":"

The service detail data that's returned by Proton.

" } } }, @@ -2306,7 +2843,7 @@ "members":{ "serviceTemplate":{ "shape":"ServiceTemplate", - "documentation":"

The service template detail data that's returned by AWS Proton.

" + "documentation":"

The service template detail data that's returned by Proton.

" } } }, @@ -2320,7 +2857,7 @@ "members":{ "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

To view service template major version detail data, include majorVersion.

" + "documentation":"

To view service template major version detail data, include major Version.

" }, "minorVersion":{ "shape":"TemplateVersionPart", @@ -2338,7 +2875,72 @@ "members":{ "serviceTemplateVersion":{ "shape":"ServiceTemplateVersion", - "documentation":"

The service template version detail data that's returned by AWS Proton.

" + "documentation":"

The service template version detail data that's returned by Proton.

" + } + } + }, + "GetTemplateSyncConfigInput":{ + "type":"structure", + "required":[ + "templateName", + "templateType" + ], + "members":{ + "templateName":{ + "shape":"ResourceName", + "documentation":"

The template name.

" + }, + "templateType":{ + "shape":"TemplateType", + "documentation":"

The template type.

" + } + } + }, + "GetTemplateSyncConfigOutput":{ + "type":"structure", + "members":{ + "templateSyncConfig":{ + "shape":"TemplateSyncConfig", + "documentation":"

The template sync configuration detail data that's returned by Proton.

" + } + } + }, + "GetTemplateSyncStatusInput":{ + "type":"structure", + "required":[ + "templateName", + "templateType", + "templateVersion" + ], + "members":{ + "templateName":{ + "shape":"ResourceName", + "documentation":"

The template name.

" + }, + "templateType":{ + "shape":"TemplateType", + "documentation":"

The template type.

" + }, + "templateVersion":{ + "shape":"TemplateVersionPart", + "documentation":"

The template version.

" + } + } + }, + "GetTemplateSyncStatusOutput":{ + "type":"structure", + "members":{ + "desiredState":{ + "shape":"Revision", + "documentation":"

The template sync desired state that's returned by Proton.

" + }, + "latestSuccessfulSync":{ + "shape":"ResourceSyncAttempt", + "documentation":"

The details of the last successful sync that's returned by Proton.

" + }, + "latestSync":{ + "shape":"ResourceSyncAttempt", + "documentation":"

The details of the last sync that's returned by Proton.

" } } }, @@ -2390,7 +2992,7 @@ "members":{ "environmentAccountConnections":{ "shape":"EnvironmentAccountConnectionSummaryList", - "documentation":"

An array of environment account connections with details that's returned by AWS Proton.

" + "documentation":"

An array of environment account connections with details that's returned by Proton.

" }, "nextToken":{ "shape":"NextToken", @@ -2398,13 +3000,69 @@ } } }, + "ListEnvironmentOutputsInput":{ + "type":"structure", + "required":["environmentName"], + "members":{ + "environmentName":{ + "shape":"ResourceName", + "documentation":"

The environment name.

" + }, + "nextToken":{ + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next environment output in the array of environment outputs, after the list of environment outputs that was previously requested.

" + } + } + }, + "ListEnvironmentOutputsOutput":{ + "type":"structure", + "required":["outputs"], + "members":{ + "nextToken":{ + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next environment output in the array of environment outputs, after the current requested list of environment outputs.

" + }, + "outputs":{ + "shape":"OutputsList", + "documentation":"

An array of environment outputs with detail data.

" + } + } + }, + "ListEnvironmentProvisionedResourcesInput":{ + "type":"structure", + "required":["environmentName"], + "members":{ + "environmentName":{ + "shape":"ResourceName", + "documentation":"

The environment name.

" + }, + "nextToken":{ + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next environment provisioned resource in the array of environment provisioned resources, after the list of environment provisioned resources that was previously requested.

" + } + } + }, + "ListEnvironmentProvisionedResourcesOutput":{ + "type":"structure", + "required":["provisionedResources"], + "members":{ + "nextToken":{ + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next environment provisioned resource in the array of provisioned resources, after the current requested list of environment provisioned resources.

" + }, + "provisionedResources":{ + "shape":"ProvisionedResourceList", + "documentation":"

An array of environment provisioned resources.

" + } + } + }, "ListEnvironmentTemplateVersionsInput":{ "type":"structure", "required":["templateName"], "members":{ "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

To view a list of minor of versions under a major version of an environment template, include majorVersion.

To view a list of major versions of an environment template, exclude majorVersion.

" + "documentation":"

To view a list of minor of versions under a major version of an environment template, include major Version.

To view a list of major versions of an environment template, exclude major Version.

" }, "maxResults":{ "shape":"MaxPageResults", @@ -2492,138 +3150,331 @@ } } }, - "ListServiceInstancesInput":{ + "ListRepositoriesInput":{ "type":"structure", "members":{ "maxResults":{ "shape":"MaxPageResults", - "documentation":"

The maximum number of service instances to list.

" + "documentation":"

The maximum number of repositories to list.

" }, "nextToken":{ "shape":"NextToken", - "documentation":"

A token to indicate the location of the next service in the array of service instances, after the list of service instances that was previously requested.

" - }, - "serviceName":{ - "shape":"ResourceName", - "documentation":"

The name of the service that the service instance belongs to.

" + "documentation":"

A token to indicate the location of the next repository in the array of repositories, after the list of repositories previously requested.

" } } }, - "ListServiceInstancesOutput":{ + "ListRepositoriesOutput":{ "type":"structure", - "required":["serviceInstances"], + "required":["repositories"], "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

A token to indicate the location of the next service instance in the array of service instances, after the current requested list of service instances.

" + "documentation":"

A token to indicate the location of the next repository in the array of repositories, after the current requested list of repositories.

" }, - "serviceInstances":{ - "shape":"ServiceInstanceSummaryList", - "documentation":"

An array of service instances with summaries of detail data.

" + "repositories":{ + "shape":"RepositorySummaryList", + "documentation":"

An array of repositories.

" } } }, - "ListServiceTemplateVersionsInput":{ + "ListRepositorySyncDefinitionsInput":{ "type":"structure", - "required":["templateName"], + "required":[ + "repositoryName", + "repositoryProvider", + "syncType" + ], "members":{ - "majorVersion":{ - "shape":"TemplateVersionPart", - "documentation":"

To view a list of minor of versions under a major version of a service template, include majorVersion.

To view a list of major versions of a service template, exclude majorVersion.

" + "nextToken":{ + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next repository sync definition in the array of repository sync definitions, after the list of repository sync definitions previously requested.

" }, - "maxResults":{ - "shape":"MaxPageResults", - "documentation":"

The maximum number of major or minor versions of a service template to list.

" + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository name.

" }, - "nextToken":{ - "shape":"NextToken", - "documentation":"

A token to indicate the location of the next major or minor version in the array of major or minor versions of a service template, after the list of major or minor versions that was previously requested.

" + "repositoryProvider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" }, - "templateName":{ - "shape":"ResourceName", - "documentation":"

The name of the service template.

" + "syncType":{ + "shape":"SyncType", + "documentation":"

The sync type. The only supported value is TEMPLATE_SYNC.

" } } }, - "ListServiceTemplateVersionsOutput":{ + "ListRepositorySyncDefinitionsOutput":{ "type":"structure", - "required":["templateVersions"], + "required":["syncDefinitions"], "members":{ "nextToken":{ - "shape":"NextToken", - "documentation":"

A token to indicate the location of the next major or minor version in the array of major or minor versions of a service template, after the list of major or minor versions that was previously requested.

" + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next repository sync definition in the array of repository sync definitions, after the current requested list of repository sync definitions.

" }, - "templateVersions":{ - "shape":"ServiceTemplateVersionSummaryList", - "documentation":"

An array of major or minor versions of a service template with detail data.

" + "syncDefinitions":{ + "shape":"RepositorySyncDefinitionList", + "documentation":"

An array of repository sync definitions.

" } } }, - "ListServiceTemplatesInput":{ + "ListServiceInstanceOutputsInput":{ "type":"structure", + "required":[ + "serviceInstanceName", + "serviceName" + ], "members":{ - "maxResults":{ - "shape":"MaxPageResults", - "documentation":"

The maximum number of service templates to list.

" - }, "nextToken":{ - "shape":"NextToken", - "documentation":"

A token to indicate the location of the next service template in the array of service templates, after the list of service templates previously requested.

" + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next output in the array of outputs, after the list of outputs that was previously requested.

" + }, + "serviceInstanceName":{ + "shape":"ResourceName", + "documentation":"

The service instance name.

" + }, + "serviceName":{ + "shape":"ResourceName", + "documentation":"

The service name.

" } } }, - "ListServiceTemplatesOutput":{ + "ListServiceInstanceOutputsOutput":{ "type":"structure", - "required":["templates"], + "required":["outputs"], "members":{ "nextToken":{ - "shape":"NextToken", - "documentation":"

A token to indicate the location of the next service template in the array of service templates, after the current requested list of service templates.

" + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.

" }, - "templates":{ - "shape":"ServiceTemplateSummaryList", - "documentation":"

An array of service templates with detail data.

" + "outputs":{ + "shape":"OutputsList", + "documentation":"

An array of service instance infrastructure as code outputs.

" } } }, - "ListServicesInput":{ + "ListServiceInstanceProvisionedResourcesInput":{ "type":"structure", + "required":[ + "serviceInstanceName", + "serviceName" + ], "members":{ - "maxResults":{ - "shape":"MaxPageResults", - "documentation":"

The maximum number of services to list.

" - }, "nextToken":{ - "shape":"NextToken", - "documentation":"

A token to indicate the location of the next service in the array of services, after the list of services that was previously requested.

" + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the list of provisioned resources that was previously requested.

" + }, + "serviceInstanceName":{ + "shape":"ResourceName", + "documentation":"

The service instance name.

" + }, + "serviceName":{ + "shape":"ResourceName", + "documentation":"

The service name.

" } } }, - "ListServicesOutput":{ + "ListServiceInstanceProvisionedResourcesOutput":{ "type":"structure", - "required":["services"], + "required":["provisionedResources"], "members":{ "nextToken":{ - "shape":"NextToken", - "documentation":"

A token to indicate the location of the next service in the array of services, after the current requested list of services.

" + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of provisioned resources.

" }, - "services":{ - "shape":"ServiceSummaryList", - "documentation":"

An array of services with summaries of detail data.

" + "provisionedResources":{ + "shape":"ProvisionedResourceList", + "documentation":"

An array of provisioned resources for a service instance.

" } } }, - "ListTagsForResourceInput":{ + "ListServiceInstancesInput":{ "type":"structure", - "required":["resourceArn"], "members":{ "maxResults":{ "shape":"MaxPageResults", - "documentation":"

The maximum number of tags to list.

" + "documentation":"

The maximum number of service instances to list.

" }, "nextToken":{ - "shape":"String", - "documentation":"

A token to indicate the location of the next resource tag in the array of resource tags, after the list of resource tags that was previously requested.

" + "shape":"NextToken", + "documentation":"

A token to indicate the location of the next service in the array of service instances, after the list of service instances that was previously requested.

" + }, + "serviceName":{ + "shape":"ResourceName", + "documentation":"

The name of the service that the service instance belongs to.

" + } + } + }, + "ListServiceInstancesOutput":{ + "type":"structure", + "required":["serviceInstances"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token to indicate the location of the next service instance in the array of service instances, after the current requested list of service instances.

" + }, + "serviceInstances":{ + "shape":"ServiceInstanceSummaryList", + "documentation":"

An array of service instances with summaries of detail data.

" + } + } + }, + "ListServicePipelineOutputsInput":{ + "type":"structure", + "required":["serviceName"], + "members":{ + "nextToken":{ + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next output in the array of outputs, after the list of outputs that was previously requested.

" + }, + "serviceName":{ + "shape":"ResourceName", + "documentation":"

The service name.

" + } + } + }, + "ListServicePipelineOutputsOutput":{ + "type":"structure", + "required":["outputs"], + "members":{ + "nextToken":{ + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.

" + }, + "outputs":{ + "shape":"OutputsList", + "documentation":"

An array of outputs.

" + } + } + }, + "ListServicePipelineProvisionedResourcesInput":{ + "type":"structure", + "required":["serviceName"], + "members":{ + "nextToken":{ + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the list of provisioned resources that was previously requested.

" + }, + "serviceName":{ + "shape":"ResourceName", + "documentation":"

The service name.

" + } + } + }, + "ListServicePipelineProvisionedResourcesOutput":{ + "type":"structure", + "required":["provisionedResources"], + "members":{ + "nextToken":{ + "shape":"EmptyNextToken", + "documentation":"

A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of provisioned resources.

" + }, + "provisionedResources":{ + "shape":"ProvisionedResourceList", + "documentation":"

An array of provisioned resources for a service and pipeline.

" + } + } + }, + "ListServiceTemplateVersionsInput":{ + "type":"structure", + "required":["templateName"], + "members":{ + "majorVersion":{ + "shape":"TemplateVersionPart", + "documentation":"

To view a list of minor of versions under a major version of a service template, include major Version.

To view a list of major versions of a service template, exclude major Version.

" + }, + "maxResults":{ + "shape":"MaxPageResults", + "documentation":"

The maximum number of major or minor versions of a service template to list.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token to indicate the location of the next major or minor version in the array of major or minor versions of a service template, after the list of major or minor versions that was previously requested.

" + }, + "templateName":{ + "shape":"ResourceName", + "documentation":"

The name of the service template.

" + } + } + }, + "ListServiceTemplateVersionsOutput":{ + "type":"structure", + "required":["templateVersions"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token to indicate the location of the next major or minor version in the array of major or minor versions of a service template, after the current requested list of service major or minor versions.

" + }, + "templateVersions":{ + "shape":"ServiceTemplateVersionSummaryList", + "documentation":"

An array of major or minor versions of a service template with detail data.

" + } + } + }, + "ListServiceTemplatesInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxPageResults", + "documentation":"

The maximum number of service templates to list.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token to indicate the location of the next service template in the array of service templates, after the list of service templates previously requested.

" + } + } + }, + "ListServiceTemplatesOutput":{ + "type":"structure", + "required":["templates"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token to indicate the location of the next service template in the array of service templates, after the current requested list of service templates.

" + }, + "templates":{ + "shape":"ServiceTemplateSummaryList", + "documentation":"

An array of service templates with detail data.

" + } + } + }, + "ListServicesInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxPageResults", + "documentation":"

The maximum number of services to list.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token to indicate the location of the next service in the array of services, after the list of services that was previously requested.

" + } + } + }, + "ListServicesOutput":{ + "type":"structure", + "required":["services"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token to indicate the location of the next service in the array of services, after the current requested list of services.

" + }, + "services":{ + "shape":"ServiceSummaryList", + "documentation":"

An array of services with summaries of detail data.

" + } + } + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "maxResults":{ + "shape":"MaxPageResults", + "documentation":"

The maximum number of tags to list.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

A token to indicate the location of the next resource tag in the array of resource tags, after the list of resource tags that was previously requested.

" }, "resourceArn":{ "shape":"Arn", @@ -2655,6 +3506,121 @@ "type":"string", "pattern":"^[A-Za-z0-9+=/]+$" }, + "NotifyResourceDeploymentStatusChangeInput":{ + "type":"structure", + "required":[ + "resourceArn", + "status" + ], + "members":{ + "deploymentId":{ + "shape":"DeploymentId", + "documentation":"

The deployment ID for your provisioned resource.

" + }, + "outputs":{ + "shape":"NotifyResourceDeploymentStatusChangeInputOutputsList", + "documentation":"

The provisioned resource state change detail data that's returned by Proton.

" + }, + "resourceArn":{ + "shape":"Arn", + "documentation":"

The provisioned resource Amazon Resource Name (ARN).

" + }, + "status":{ + "shape":"ResourceDeploymentStatus", + "documentation":"

The status of your provisioned resource.

" + }, + "statusMessage":{ + "shape":"SyntheticNotifyResourceDeploymentStatusChangeInputString", + "documentation":"

The deployment status message for your provisioned resource.

" + } + } + }, + "NotifyResourceDeploymentStatusChangeInputOutputsList":{ + "type":"list", + "member":{"shape":"Output"}, + "max":50, + "min":0 + }, + "NotifyResourceDeploymentStatusChangeOutput":{ + "type":"structure", + "members":{ + } + }, + "Output":{ + "type":"structure", + "members":{ + "key":{ + "shape":"OutputKey", + "documentation":"

The output key.

" + }, + "valueString":{ + "shape":"OutputValueString", + "documentation":"

The output value.

" + } + }, + "documentation":"

An infrastructure as code defined resource output.

", + "sensitive":true + }, + "OutputKey":{ + "type":"string", + "max":1024, + "min":1 + }, + "OutputValueString":{ + "type":"string", + "max":1024, + "min":1 + }, + "OutputsList":{ + "type":"list", + "member":{"shape":"Output"} + }, + "PipelineRoleArn":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"(^$)|(^arn:[a-zA-Z-]+:[a-zA-Z0-9-]+:[a-zA-Z0-9-]*:\\d*:[\\w+=\\/:,\\.@-]*)" + }, + "ProvisionedResource":{ + "type":"structure", + "members":{ + "identifier":{ + "shape":"ProvisionedResourceIdentifier", + "documentation":"

The provisioned resource identifier.

" + }, + "name":{ + "shape":"ProvisionedResourceName", + "documentation":"

The provisioned resource name.

" + }, + "provisioningEngine":{ + "shape":"ProvisionedResourceEngine", + "documentation":"

The resource provisioning engine.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" + } + }, + "documentation":"

Detail data for a provisioned resource.

" + }, + "ProvisionedResourceEngine":{ + "type":"string", + "documentation":"

List of provisioning engines

", + "enum":[ + "CLOUDFORMATION", + "TERRAFORM" + ] + }, + "ProvisionedResourceIdentifier":{ + "type":"string", + "max":200, + "min":1 + }, + "ProvisionedResourceList":{ + "type":"list", + "member":{"shape":"ProvisionedResource"} + }, + "ProvisionedResourceName":{ + "type":"string", + "max":200, + "min":1 + }, "Provisioning":{ "type":"string", "enum":["CUSTOMER_MANAGED"] @@ -2675,15 +3641,245 @@ "members":{ "environmentAccountConnection":{ "shape":"EnvironmentAccountConnection", - "documentation":"

The environment connection account detail data that's returned by AWS Proton.

" + "documentation":"

The environment connection account detail data that's returned by Proton.

" } } }, + "Repository":{ + "type":"structure", + "required":[ + "arn", + "connectionArn", + "name", + "provider" + ], + "members":{ + "arn":{ + "shape":"RepositoryArn", + "documentation":"

The repository Amazon Resource Name (ARN).

" + }, + "connectionArn":{ + "shape":"Arn", + "documentation":"

The repository Amazon Web Services CodeStar connection that connects Proton to your repository.

" + }, + "encryptionKey":{ + "shape":"Arn", + "documentation":"

Your customer Amazon Web Services KMS encryption key.

" + }, + "name":{ + "shape":"RepositoryName", + "documentation":"

The repository name.

" + }, + "provider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + } + }, + "documentation":"

Detail date for a repository that has been registered with Proton.

" + }, + "RepositoryArn":{"type":"string"}, + "RepositoryBranch":{ + "type":"structure", + "required":[ + "arn", + "branch", + "name", + "provider" + ], + "members":{ + "arn":{ + "shape":"RepositoryArn", + "documentation":"

The Amazon Resource Name (ARN) of the repository branch.

" + }, + "branch":{ + "shape":"GitBranchName", + "documentation":"

The repository branch.

" + }, + "name":{ + "shape":"RepositoryName", + "documentation":"

The repository name.

" + }, + "provider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + } + }, + "documentation":"

Detail data for a repository branch.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" + }, + "RepositoryBranchInput":{ + "type":"structure", + "required":[ + "branch", + "name", + "provider" + ], + "members":{ + "branch":{ + "shape":"GitBranchName", + "documentation":"

The repository branch.

" + }, + "name":{ + "shape":"RepositoryName", + "documentation":"

The repository name.

" + }, + "provider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + } + }, + "documentation":"

Detail input data for a repository branch.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" + }, "RepositoryId":{ "type":"string", "max":200, "min":1 }, + "RepositoryName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9_.-].*/[A-Za-z0-9_.-].*" + }, + "RepositoryProvider":{ + "type":"string", + "enum":[ + "GITHUB", + "GITHUB_ENTERPRISE", + "BITBUCKET" + ] + }, + "RepositorySummary":{ + "type":"structure", + "required":[ + "arn", + "name", + "provider" + ], + "members":{ + "arn":{ + "shape":"RepositoryArn", + "documentation":"

The Amazon Resource Name (ARN) for a repository.

" + }, + "name":{ + "shape":"RepositoryName", + "documentation":"

The repository name.

" + }, + "provider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + } + }, + "documentation":"

A summary of detail data for a registered repository.

" + }, + "RepositorySummaryList":{ + "type":"list", + "member":{"shape":"RepositorySummary"} + }, + "RepositorySyncAttempt":{ + "type":"structure", + "required":[ + "events", + "startedAt", + "status" + ], + "members":{ + "events":{ + "shape":"RepositorySyncEvents", + "documentation":"

Detail data for sync attempt events.

" + }, + "startedAt":{ + "shape":"Timestamp", + "documentation":"

The time when the sync attempt started.

" + }, + "status":{ + "shape":"RepositorySyncStatus", + "documentation":"

The sync attempt status.

" + } + }, + "documentation":"

Detail data for a repository sync attempt activated by a push to a repository.

" + }, + "RepositorySyncDefinition":{ + "type":"structure", + "required":[ + "branch", + "directory", + "parent", + "target" + ], + "members":{ + "branch":{ + "shape":"GitBranchName", + "documentation":"

The repository branch.

" + }, + "directory":{ + "shape":"String", + "documentation":"

The directory in the repository.

" + }, + "parent":{ + "shape":"String", + "documentation":"

The resource that is synced from.

" + }, + "target":{ + "shape":"String", + "documentation":"

The resource that is synced to.

" + } + }, + "documentation":"

The repository sync definition.

" + }, + "RepositorySyncDefinitionList":{ + "type":"list", + "member":{"shape":"RepositorySyncDefinition"} + }, + "RepositorySyncEvent":{ + "type":"structure", + "required":[ + "event", + "time", + "type" + ], + "members":{ + "event":{ + "shape":"String", + "documentation":"

Event detail for a repository sync attempt.

" + }, + "externalId":{ + "shape":"String", + "documentation":"

The external ID of the sync event.

" + }, + "time":{ + "shape":"Timestamp", + "documentation":"

The time that the sync event occurred.

" + }, + "type":{ + "shape":"String", + "documentation":"

The type of event.

" + } + }, + "documentation":"

Repository sync event detail data for a sync attempt.

" + }, + "RepositorySyncEvents":{ + "type":"list", + "member":{"shape":"RepositorySyncEvent"} + }, + "RepositorySyncStatus":{ + "type":"string", + "enum":[ + "INITIATED", + "IN_PROGRESS", + "SUCCEEDED", + "FAILED", + "QUEUED" + ] + }, + "ResourceDeploymentStatus":{ + "type":"string", + "documentation":"

The state that a PR-based deployment can be updated to.

", + "enum":[ + "IN_PROGRESS", + "FAILED", + "SUCCEEDED" + ] + }, "ResourceName":{ "type":"string", "max":100, @@ -2699,6 +3895,117 @@ "documentation":"

The requested resource wasn't found.

", "exception":true }, + "ResourceSyncAttempt":{ + "type":"structure", + "required":[ + "events", + "initialRevision", + "startedAt", + "status", + "target", + "targetRevision" + ], + "members":{ + "events":{ + "shape":"ResourceSyncEvents", + "documentation":"

An array of events with detail data.

" + }, + "initialRevision":{ + "shape":"Revision", + "documentation":"

Detail data for the initial repository commit, path and push.

" + }, + "startedAt":{ + "shape":"Timestamp", + "documentation":"

The time when the sync attempt started.

" + }, + "status":{ + "shape":"ResourceSyncStatus", + "documentation":"

The status of the sync attempt.

" + }, + "target":{ + "shape":"String", + "documentation":"

The resource that is synced to.

" + }, + "targetRevision":{ + "shape":"Revision", + "documentation":"

Detail data for the target revision.

" + } + }, + "documentation":"

Detail data for a resource sync attempt activated by a push to a repository.

" + }, + "ResourceSyncEvent":{ + "type":"structure", + "required":[ + "event", + "time", + "type" + ], + "members":{ + "event":{ + "shape":"String", + "documentation":"

A resource sync event.

" + }, + "externalId":{ + "shape":"String", + "documentation":"

The external ID for the event.

" + }, + "time":{ + "shape":"Timestamp", + "documentation":"

The time when the event occurred.

" + }, + "type":{ + "shape":"String", + "documentation":"

The type of event.

" + } + }, + "documentation":"

Detail data for a resource sync event.

" + }, + "ResourceSyncEvents":{ + "type":"list", + "member":{"shape":"ResourceSyncEvent"} + }, + "ResourceSyncStatus":{ + "type":"string", + "enum":[ + "INITIATED", + "IN_PROGRESS", + "SUCCEEDED", + "FAILED" + ] + }, + "Revision":{ + "type":"structure", + "required":[ + "branch", + "directory", + "repositoryName", + "repositoryProvider", + "sha" + ], + "members":{ + "branch":{ + "shape":"GitBranchName", + "documentation":"

The repository branch.

" + }, + "directory":{ + "shape":"String", + "documentation":"

The repository directory changed by a commit and push that activated the sync attempt.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The repository name.

" + }, + "repositoryProvider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + }, + "sha":{ + "shape":"SHA", + "documentation":"

The secure hash algorithm (SHA) hash for the revision.

" + } + }, + "documentation":"

Revision detail data for a commit and push that activates a sync attempt

" + }, "S3Bucket":{ "type":"string", "max":63, @@ -2728,6 +4035,11 @@ }, "documentation":"

Template bundle S3 bucket data.

" }, + "SHA":{ + "type":"string", + "max":255, + "min":1 + }, "Service":{ "type":"structure", "required":[ @@ -2746,7 +4058,7 @@ }, "branchName":{ "shape":"GitBranchName", - "documentation":"

The name of the code repository branch that holds the code that's deployed in AWS Proton.

" + "documentation":"

The name of the code repository branch that holds the code that's deployed in Proton.

" }, "createdAt":{ "shape":"Timestamp", @@ -2770,11 +4082,11 @@ }, "repositoryConnectionArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up a repository connection in the AWS Proton Administrator Guide and Setting up with AWS Proton in the AWS Proton User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up a repository connection in the Proton Administrator Guide and Setting up with Proton in the Proton User Guide.

" }, "repositoryId":{ "shape":"RepositoryId", - "documentation":"

The ID of the code repository.

" + "documentation":"

The ID of the source code repository.

" }, "spec":{ "shape":"SpecContents", @@ -2854,11 +4166,11 @@ }, "templateMajorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the major version of the service template that was used to create the service instance.

" + "documentation":"

The major version of the service template that was used to create the service instance.

" }, "templateMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of the service template that was used to create the service instance.

" + "documentation":"

The minor version of the service template that was used to create the service instance.

" }, "templateName":{ "shape":"ResourceName", @@ -2922,11 +4234,11 @@ }, "templateMajorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the major version of a service template.

" + "documentation":"

The service instance template major version.

" }, "templateMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of a service template.

" + "documentation":"

The service instance template minor version.

" }, "templateName":{ "shape":"ResourceName", @@ -2982,11 +4294,11 @@ }, "templateMajorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the major version of the service template that was used to create the service pipeline.

" + "documentation":"

The major version of the service template that was used to create the service pipeline.

" }, "templateMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of the service template that was used to create the service pipeline.

" + "documentation":"

The minor version of the service template that was used to create the service pipeline.

" }, "templateName":{ "shape":"ResourceName", @@ -3001,7 +4313,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

A quota was exceeded. For more information, see AWS Proton Quotas in the AWS Proton Administrator Guide.

", + "documentation":"

A quota was exceeded. For more information, see Proton Quotas in the Proton Administrator Guide.

", "exception":true }, "ServiceStatus":{ @@ -3116,7 +4428,7 @@ }, "recommendedVersion":{ "shape":"FullTemplateVersionNumber", - "documentation":"

The ID of the recommended version of the service template.

" + "documentation":"

The recommended version of the service template.

" } }, "documentation":"

The service template detail data.

" @@ -3161,7 +4473,7 @@ }, "recommendedVersion":{ "shape":"FullTemplateVersionNumber", - "documentation":"

The ID of the recommended version of the service template.

" + "documentation":"

The recommended version of the service template.

" } }, "documentation":"

The service template summary data.

" @@ -3205,15 +4517,15 @@ }, "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the latest major version that's associated with the version of a service template.

" + "documentation":"

The latest major version that's associated with the version of a service template.

" }, "minorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of a service template.

" + "documentation":"

The minor version of a service template.

" }, "recommendedMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the recommended minor version of the service template.

" + "documentation":"

The recommended minor version of the service template.

" }, "schema":{ "shape":"TemplateSchema", @@ -3265,15 +4577,15 @@ }, "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the latest major version that's associated with the version of a service template.

" + "documentation":"

The latest major version that's associated with the version of a service template.

" }, "minorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of a service template.

" + "documentation":"

The minor version of a service template.

" }, "recommendedMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the recommended minor version of the service template.

" + "documentation":"

The recommended minor version of the service template.

" }, "status":{ "shape":"TemplateVersionStatus", @@ -3305,6 +4617,21 @@ "sensitive":true }, "String":{"type":"string"}, + "Subdirectory":{ + "type":"string", + "max":4096, + "min":1 + }, + "SyncType":{ + "type":"string", + "enum":["TEMPLATE_SYNC"] + }, + "SyntheticNotifyResourceDeploymentStatusChangeInputString":{ + "type":"string", + "max":5000, + "min":0, + "sensitive":true + }, "Tag":{ "type":"structure", "required":[ @@ -3373,6 +4700,50 @@ "min":1, "sensitive":true }, + "TemplateSyncConfig":{ + "type":"structure", + "required":[ + "branch", + "repositoryName", + "repositoryProvider", + "templateName", + "templateType" + ], + "members":{ + "branch":{ + "shape":"GitBranchName", + "documentation":"

The repository branch.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository, for example myrepos/myrepo.

" + }, + "repositoryProvider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + }, + "subdirectory":{ + "shape":"Subdirectory", + "documentation":"

A subdirectory path to your template bundle version.

" + }, + "templateName":{ + "shape":"ResourceName", + "documentation":"

The template name.

" + }, + "templateType":{ + "shape":"TemplateType", + "documentation":"

The template type.

" + } + }, + "documentation":"

The detail data for a template sync configuration.

" + }, + "TemplateType":{ + "type":"string", + "enum":[ + "ENVIRONMENT", + "SERVICE" + ] + }, "TemplateVersionPart":{ "type":"string", "max":20, @@ -3435,9 +4806,13 @@ "UpdateAccountSettingsInput":{ "type":"structure", "members":{ + "pipelineProvisioningRepository":{ + "shape":"RepositoryBranchInput", + "documentation":"

The repository that you provide with pull request provisioning.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" + }, "pipelineServiceRoleArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Proton pipeline service role.

" + "shape":"PipelineRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Proton pipeline service role.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" } } }, @@ -3447,7 +4822,7 @@ "members":{ "accountSettings":{ "shape":"AccountSettings", - "documentation":"

The AWS Proton pipeline service role detail data that's returned by AWS Proton.

" + "documentation":"

The Proton pipeline service role repository detail data that's returned by Proton.

" } } }, @@ -3474,7 +4849,7 @@ "members":{ "environmentAccountConnection":{ "shape":"EnvironmentAccountConnection", - "documentation":"

The environment account connection detail data that's returned by AWS Proton.

" + "documentation":"

The environment account connection detail data that's returned by Proton.

" } } }, @@ -3503,7 +4878,11 @@ }, "protonServiceRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make API calls to other services your behalf.

" + "documentation":"

The Amazon Resource Name (ARN) of the Proton service role that allows Proton to make API calls to other services your behalf.

" + }, + "provisioningRepository":{ + "shape":"RepositoryBranchInput", + "documentation":"

The repository that you provide with pull request provisioning.

Provisioning by pull request is currently in feature preview and is only usable with Terraform based Proton Templates. To learn more about Amazon Web Services Feature Preview terms, see section 2 on Beta and Previews.

" }, "spec":{ "shape":"SpecContents", @@ -3511,11 +4890,11 @@ }, "templateMajorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the major version of the environment to update.

" + "documentation":"

The major version of the environment to update.

" }, "templateMinorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

The ID of the minor version of the environment to update.

" + "documentation":"

The minor version of the environment to update.

" } } }, @@ -3525,7 +4904,7 @@ "members":{ "environment":{ "shape":"Environment", - "documentation":"

The environment detail data that's returned by AWS Proton.

" + "documentation":"

The environment detail data that's returned by Proton.

" } } }, @@ -3553,7 +4932,7 @@ "members":{ "environmentTemplate":{ "shape":"EnvironmentTemplate", - "documentation":"

The environment template detail data that's returned by AWS Proton.

" + "documentation":"

The environment template detail data that's returned by Proton.

" } } }, @@ -3571,7 +4950,7 @@ }, "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

To update a major version of an environment template, include majorVersion.

" + "documentation":"

To update a major version of an environment template, include major Version.

" }, "minorVersion":{ "shape":"TemplateVersionPart", @@ -3593,7 +4972,7 @@ "members":{ "environmentTemplateVersion":{ "shape":"EnvironmentTemplateVersion", - "documentation":"

The environment template version detail data that's returned by AWS Proton.

" + "documentation":"

The environment template version detail data that's returned by Proton.

" } } }, @@ -3611,7 +4990,7 @@ }, "spec":{ "shape":"SpecContents", - "documentation":"

Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the AWS Proton Administrator Guide or the AWS Proton User Guide.

" + "documentation":"

Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the Proton Administrator Guide or the Proton User Guide.

" } } }, @@ -3655,7 +5034,7 @@ "members":{ "serviceInstance":{ "shape":"ServiceInstance", - "documentation":"

The service instance summary data returned by AWS Proton.

" + "documentation":"

The service instance summary data returned by Proton.

" } } }, @@ -3665,7 +5044,7 @@ "members":{ "service":{ "shape":"Service", - "documentation":"

The service detail data that's returned by AWS Proton.

" + "documentation":"

The service detail data that's returned by Proton.

" } } }, @@ -3705,7 +5084,7 @@ "members":{ "pipeline":{ "shape":"ServicePipeline", - "documentation":"

The pipeline details returned by AWS Proton.

" + "documentation":"

The pipeline details returned by Proton.

" } } }, @@ -3733,7 +5112,7 @@ "members":{ "serviceTemplate":{ "shape":"ServiceTemplate", - "documentation":"

The service template detail data that's returned by AWS Proton.

" + "documentation":"

The service template detail data that's returned by Proton.

" } } }, @@ -3755,7 +5134,7 @@ }, "majorVersion":{ "shape":"TemplateVersionPart", - "documentation":"

To update a major version of a service template, include majorVersion.

" + "documentation":"

To update a major version of a service template, include major Version.

" }, "minorVersion":{ "shape":"TemplateVersionPart", @@ -3777,7 +5156,52 @@ "members":{ "serviceTemplateVersion":{ "shape":"ServiceTemplateVersion", - "documentation":"

The service template version detail data that's returned by AWS Proton.

" + "documentation":"

The service template version detail data that's returned by Proton.

" + } + } + }, + "UpdateTemplateSyncConfigInput":{ + "type":"structure", + "required":[ + "branch", + "repositoryName", + "repositoryProvider", + "templateName", + "templateType" + ], + "members":{ + "branch":{ + "shape":"GitBranchName", + "documentation":"

The repository branch.

" + }, + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"

The name of the repository, for example myrepos/myrepo.

" + }, + "repositoryProvider":{ + "shape":"RepositoryProvider", + "documentation":"

The repository provider.

" + }, + "subdirectory":{ + "shape":"Subdirectory", + "documentation":"

A subdirectory path to your template bundle version. When included, limits the template bundle search to this repository directory.

" + }, + "templateName":{ + "shape":"ResourceName", + "documentation":"

The synced template name.

" + }, + "templateType":{ + "shape":"TemplateType", + "documentation":"

The synced template type.

" + } + } + }, + "UpdateTemplateSyncConfigOutput":{ + "type":"structure", + "members":{ + "templateSyncConfig":{ + "shape":"TemplateSyncConfig", + "documentation":"

The template sync configuration detail data that's returned by Proton.

" } } }, @@ -3791,5 +5215,5 @@ "exception":true } }, - "documentation":"

This is the AWS Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the AWS Proton service.

The documentation for each action shows the Query API request parameters and the XML response.

Alternatively, you can use the AWS CLI to access an API. For more information, see the AWS Command Line Interface User Guide.

The AWS Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.

Because administrators define the infrastructure and tooling that AWS Proton deploys and manages, they need permissions to use all of the listed API operations.

When developers select a specific infrastructure and tooling set, AWS Proton deploys their applications. To monitor their applications that are running on AWS Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.

To learn more about AWS Proton administration, see the AWS Proton Administrator Guide.

To learn more about deploying serverless and containerized applications on AWS Proton, see the AWS Proton User Guide.

Ensuring Idempotency

When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.

Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.

The following lists of APIs are grouped according to methods that ensure idempotency.

Idempotent create APIs with a client token

The API actions in this list support idempotency with the use of a client token. The corresponding AWS CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.

Given a request action that has succeeded:

If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.

If the original resource is deleted and you retry the request, a new resource is created.

Idempotent create APIs with a client token:

Idempotent create APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, and the original resource hasn't been modified, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If the original resource has been modified, the retry throws a ConflictException.

If you retry with different input parameters, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Idempotent create APIs:

Idempotent delete APIs

Given a request action that has succeeded:

When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.

If you retry and the resource doesn't exist, the response is empty.

In both cases, the retry succeeds.

Idempotent delete APIs:

Asynchronous idempotent delete APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, if the original request delete operation status is DELETE_IN_PROGRESS, the retry returns the resource detail data in the response without performing any further actions.

If the original request delete operation is complete, a retry returns an empty response.

Asynchronous idempotent delete APIs:

" + "documentation":"

This is the Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the Proton service.

The documentation for each action shows the Query API request parameters and the XML response.

Alternatively, you can use the Amazon Web Services CLI to access an API. For more information, see the Amazon Web Services Command Line Interface User Guide.

The Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.

Because administrators define the infrastructure and tooling that Proton deploys and manages, they need permissions to use all of the listed API operations.

When developers select a specific infrastructure and tooling set, Proton deploys their applications. To monitor their applications that are running on Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.

To learn more about Proton administration, see the Proton Administrator Guide.

To learn more about deploying serverless and containerized applications on Proton, see the Proton User Guide.

Ensuring Idempotency

When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.

Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.

The following lists of APIs are grouped according to methods that ensure idempotency.

Idempotent create APIs with a client token

The API actions in this list support idempotency with the use of a client token. The corresponding Amazon Web Services CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.

Given a request action that has succeeded:

If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.

If the original resource is deleted and you retry the request, a new resource is created.

Idempotent create APIs with a client token:

Idempotent create APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, and the original resource hasn't been modified, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If the original resource has been modified, the retry throws a ConflictException.

If you retry with different input parameters, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Idempotent create APIs:

Idempotent delete APIs

Given a request action that has succeeded:

When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.

If you retry and the resource doesn't exist, the response is empty.

In both cases, the retry succeeds.

Idempotent delete APIs:

Asynchronous idempotent delete APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, if the original request delete operation status is DELETE_IN_PROGRESS, the retry returns the resource detail data in the response without performing any further actions.

If the original request delete operation is complete, a retry returns an empty response.

Asynchronous idempotent delete APIs:

" } diff --git a/botocore/data/timestream-query/2018-11-01/paginators-1.json b/botocore/data/timestream-query/2018-11-01/paginators-1.json index cff408061f..c497694235 100644 --- a/botocore/data/timestream-query/2018-11-01/paginators-1.json +++ b/botocore/data/timestream-query/2018-11-01/paginators-1.json @@ -10,6 +10,18 @@ ], "output_token": "NextToken", "result_key": "Rows" + }, + "ListScheduledQueries": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ScheduledQueries" + }, + "ListTagsForResource": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Tags" } } } diff --git a/botocore/data/timestream-query/2018-11-01/service-2.json b/botocore/data/timestream-query/2018-11-01/service-2.json index fd388619dc..1fb30ad505 100644 --- a/botocore/data/timestream-query/2018-11-01/service-2.json +++ b/botocore/data/timestream-query/2018-11-01/service-2.json @@ -29,7 +29,47 @@ {"shape":"ValidationException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Cancels a query that has been issued. Cancellation is guaranteed only if the query has not completed execution before the cancellation request was issued. Because cancellation is an idempotent operation, subsequent cancellation requests will return a CancellationMessage, indicating that the query has already been canceled.

", + "documentation":"

Cancels a query that has been issued. Cancellation is provided only if the query has not completed running before the cancellation request was issued. Because cancellation is an idempotent operation, subsequent cancellation requests will return a CancellationMessage, indicating that the query has already been canceled. See code sample for details.

", + "endpointdiscovery":{"required":true}, + "idempotent":true + }, + "CreateScheduledQuery":{ + "name":"CreateScheduledQuery", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateScheduledQueryRequest"}, + "output":{"shape":"CreateScheduledQueryResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

Create a scheduled query that will be run on your behalf at the configured schedule. Timestream assumes the execution role provided as part of the ScheduledQueryExecutionRoleArn parameter to run the query. You can use the NotificationConfiguration parameter to configure notification for your scheduled query operations.

", + "endpointdiscovery":{"required":true}, + "idempotent":true + }, + "DeleteScheduledQuery":{ + "name":"DeleteScheduledQuery", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteScheduledQueryRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

Deletes a given scheduled query. This is an irreversible operation.

", "endpointdiscovery":{"required":true}, "idempotent":true }, @@ -46,9 +86,101 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query.

Because Timestream’s SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless:

For detailed information on how to use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs.

", + "documentation":"

DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query.

Because the Timestream SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless:

For detailed information on how and when to use and implement DescribeEndpoints, see The Endpoint Discovery Pattern.

", "endpointoperation":true }, + "DescribeScheduledQuery":{ + "name":"DescribeScheduledQuery", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScheduledQueryRequest"}, + "output":{"shape":"DescribeScheduledQueryResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

Provides detailed information about a scheduled query.

", + "endpointdiscovery":{"required":true} + }, + "ExecuteScheduledQuery":{ + "name":"ExecuteScheduledQuery", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ExecuteScheduledQueryRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

You can use this API to run a scheduled query manually.

", + "endpointdiscovery":{"required":true}, + "idempotent":true + }, + "ListScheduledQueries":{ + "name":"ListScheduledQueries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListScheduledQueriesRequest"}, + "output":{"shape":"ListScheduledQueriesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

Gets a list of all scheduled queries in the caller's Amazon account and Region. ListScheduledQueries is eventually consistent.

", + "endpointdiscovery":{"required":true} + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

List all tags on a Timestream query resource.

", + "endpointdiscovery":{"required":true} + }, + "PrepareQuery":{ + "name":"PrepareQuery", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PrepareQueryRequest"}, + "output":{"shape":"PrepareQueryResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

A synchronous operation that allows you to submit a query with parameters to be stored by Timestream for later running. Timestream only supports using this operation with the PrepareQueryRequest$ValidateOnly set to true.

", + "endpointdiscovery":{"required":true}, + "idempotent":true + }, "Query":{ "name":"Query", "http":{ @@ -66,9 +198,62 @@ {"shape":"ValidationException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Query is a synchronous operation that enables you to execute a query. Query will timeout after 60 seconds. You must update the default timeout in the SDK to support a timeout of 60 seconds. The result set will be truncated to 1MB. Service quotas apply. For more information, see Quotas in the Timestream Developer Guide.

", + "documentation":"

Query is a synchronous operation that enables you to run a query against your Amazon Timestream data. Query will time out after 60 seconds. You must update the default timeout in the SDK to support a timeout of 60 seconds. See the code sample for details.

Your query request will fail in the following cases:

", "endpointdiscovery":{"required":true}, "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

Associate a set of tags with a Timestream resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking.

", + "endpointdiscovery":{"required":true} + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

Removes the association of tags from a Timestream query resource.

", + "endpointdiscovery":{"required":true} + }, + "UpdateScheduledQuery":{ + "name":"UpdateScheduledQuery", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateScheduledQueryRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidEndpointException"} + ], + "documentation":"

Update a scheduled query.

", + "endpointdiscovery":{"required":true} } }, "shapes":{ @@ -81,13 +266,18 @@ "exception":true, "synthetic":true }, + "AmazonResourceName":{ + "type":"string", + "max":2048, + "min":1 + }, "CancelQueryRequest":{ "type":"structure", "required":["QueryId"], "members":{ "QueryId":{ "shape":"QueryId", - "documentation":"

The id of the query that needs to be cancelled. QueryID is returned as part of QueryResult.

" + "documentation":"

The ID of the query that needs to be cancelled. QueryID is returned as part of the query result.

" } } }, @@ -106,6 +296,12 @@ "min":32, "sensitive":true }, + "ClientToken":{ + "type":"string", + "max":128, + "min":32, + "sensitive":true + }, "ColumnInfo":{ "type":"structure", "required":["Type"], @@ -116,10 +312,10 @@ }, "Type":{ "shape":"Type", - "documentation":"

The data type of the result set column. The data type can be a scalar or complex. Scalar data types are integers, strings, doubles, booleans, and others. Complex data types are types such as arrays, rows, and others.

" + "documentation":"

The data type of the result set column. The data type can be a scalar or complex. Scalar data types are integers, strings, doubles, Booleans, and others. Complex data types are types such as arrays, rows, and others.

" } }, - "documentation":"

Contains the meta data for query results such as the column names, data types, and other attributes.

" + "documentation":"

Contains the metadata for query results such as the column names, data types, and other attributes.

" }, "ColumnInfoList":{ "type":"list", @@ -133,16 +329,80 @@ "documentation":"

Unable to poll results for a cancelled query.

", "exception":true }, + "CreateScheduledQueryRequest":{ + "type":"structure", + "required":[ + "Name", + "QueryString", + "ScheduleConfiguration", + "NotificationConfiguration", + "ScheduledQueryExecutionRoleArn", + "ErrorReportConfiguration" + ], + "members":{ + "Name":{ + "shape":"ScheduledQueryName", + "documentation":"

Name of the scheduled query.

" + }, + "QueryString":{ + "shape":"QueryString", + "documentation":"

The query string to run. Parameter names can be specified in the query string @ character followed by an identifier. The named Parameter @scheduled_runtime is reserved and can be used in the query to get the time at which the query is scheduled to run.

The timestamp calculated according to the ScheduleConfiguration parameter, will be the value of @scheduled_runtime paramater for each query run. For example, consider an instance of a scheduled query executing on 2021-12-01 00:00:00. For this instance, the @scheduled_runtime parameter is initialized to the timestamp 2021-12-01 00:00:00 when invoking the query.

" + }, + "ScheduleConfiguration":{ + "shape":"ScheduleConfiguration", + "documentation":"

The schedule configuration for the query.

" + }, + "NotificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

Notification configuration for the scheduled query. A notification is sent by Timestream when a query run finishes, when the state is updated or when you delete it.

" + }, + "TargetConfiguration":{ + "shape":"TargetConfiguration", + "documentation":"

Configuration used for writing the result of a query.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

Using a ClientToken makes the call to CreateScheduledQuery idempotent, in other words, making the same request repeatedly will produce the same result. Making multiple identical CreateScheduledQuery requests has the same effect as making a single request.

", + "idempotencyToken":true + }, + "ScheduledQueryExecutionRoleArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN for the IAM role that Timestream will assume when running the scheduled query.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of key-value pairs to label the scheduled query.

" + }, + "KmsKeyId":{ + "shape":"StringValue2048", + "documentation":"

The Amazon KMS key used to encrypt the scheduled query resource, at-rest. If the Amazon KMS key is not specified, the scheduled query resource will be encrypted with a Timestream owned Amazon KMS key. To specify a KMS key, use the key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix the name with alias/

If ErrorReportConfiguration uses SSE_KMS as encryption type, the same KmsKeyId is used to encrypt the error report at rest.

" + }, + "ErrorReportConfiguration":{ + "shape":"ErrorReportConfiguration", + "documentation":"

Configuration for error reporting. Error reports will be generated when a problem is encountered when writing the query results.

" + } + } + }, + "CreateScheduledQueryResponse":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"AmazonResourceName", + "documentation":"

ARN for the created scheduled query.

" + } + } + }, "Datum":{ "type":"structure", "members":{ "ScalarValue":{ "shape":"ScalarValue", - "documentation":"

Indicates if the data point is a scalar value such as integer, string, double, or boolean.

" + "documentation":"

Indicates if the data point is a scalar value such as integer, string, double, or Boolean.

" }, "TimeSeriesValue":{ "shape":"TimeSeriesDataPointList", - "documentation":"

Indicates if the data point is of timeseries data type.

" + "documentation":"

Indicates if the data point is a timeseries data type.

" }, "ArrayValue":{ "shape":"DatumList", @@ -163,6 +423,16 @@ "type":"list", "member":{"shape":"Datum"} }, + "DeleteScheduledQueryRequest":{ + "type":"structure", + "required":["ScheduledQueryArn"], + "members":{ + "ScheduledQueryArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the scheduled query.

" + } + } + }, "DescribeEndpointsRequest":{ "type":"structure", "members":{ @@ -178,6 +448,52 @@ } } }, + "DescribeScheduledQueryRequest":{ + "type":"structure", + "required":["ScheduledQueryArn"], + "members":{ + "ScheduledQueryArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the scheduled query.

" + } + } + }, + "DescribeScheduledQueryResponse":{ + "type":"structure", + "required":["ScheduledQuery"], + "members":{ + "ScheduledQuery":{ + "shape":"ScheduledQueryDescription", + "documentation":"

The scheduled query.

" + } + } + }, + "DimensionMapping":{ + "type":"structure", + "required":[ + "Name", + "DimensionValueType" + ], + "members":{ + "Name":{ + "shape":"SchemaName", + "documentation":"

Column name from query result.

" + }, + "DimensionValueType":{ + "shape":"DimensionValueType", + "documentation":"

Type for the dimension.

" + } + }, + "documentation":"

This type is used to map column(s) from the query result to a dimension in the destination table.

" + }, + "DimensionMappingList":{ + "type":"list", + "member":{"shape":"DimensionMapping"} + }, + "DimensionValueType":{ + "type":"string", + "enum":["VARCHAR"] + }, "Double":{"type":"double"}, "Endpoint":{ "type":"structure", @@ -195,81 +511,378 @@ "documentation":"

The TTL for the endpoint, in minutes.

" } }, - "documentation":"

Represents an available endpoint against which to make API calls agaisnt, as well as the TTL for that endpoint.

" + "documentation":"

Represents an available endpoint against which to make API calls against, as well as the TTL for that endpoint.

" }, "Endpoints":{ "type":"list", "member":{"shape":"Endpoint"} }, "ErrorMessage":{"type":"string"}, - "InternalServerException":{ + "ErrorReportConfiguration":{ "type":"structure", + "required":["S3Configuration"], "members":{ - "Message":{"shape":"ErrorMessage"} + "S3Configuration":{ + "shape":"S3Configuration", + "documentation":"

The S3 configuration for the error reports.

" + } }, - "documentation":"

Timestream was unable to fully process this request because of an internal server error.

", - "exception":true + "documentation":"

Configuration required for error reporting.

" }, - "InvalidEndpointException":{ + "ErrorReportLocation":{ "type":"structure", "members":{ - "Message":{"shape":"ErrorMessage"} + "S3ReportLocation":{ + "shape":"S3ReportLocation", + "documentation":"

The S3 location where error reports are written.

" + } }, - "documentation":"

The requested endpoint was invalid.

", - "exception":true + "documentation":"

This contains the location of the error report for a single scheduled query call.

" }, - "Long":{"type":"long"}, - "MaxQueryResults":{ - "type":"integer", - "box":true, - "max":1000, - "min":1 + "ExecuteScheduledQueryRequest":{ + "type":"structure", + "required":[ + "ScheduledQueryArn", + "InvocationTime" + ], + "members":{ + "ScheduledQueryArn":{ + "shape":"AmazonResourceName", + "documentation":"

ARN of the scheduled query.

" + }, + "InvocationTime":{ + "shape":"Time", + "documentation":"

The timestamp in UTC. Query will be run as if it was invoked at this timestamp.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

Not used.

", + "idempotencyToken":true + } + } }, - "NullableBoolean":{ - "type":"boolean", - "box":true + "ExecutionStats":{ + "type":"structure", + "members":{ + "ExecutionTimeInMillis":{ + "shape":"Long", + "documentation":"

Total time, measured in milliseconds, that was needed for the scheduled query run to complete.

" + }, + "DataWrites":{ + "shape":"Long", + "documentation":"

Data writes metered for records ingested in a single scheduled query run.

" + }, + "BytesMetered":{ + "shape":"Long", + "documentation":"

Bytes metered for a single scheduled query run.

" + }, + "RecordsIngested":{ + "shape":"Long", + "documentation":"

The number of records ingested for a single scheduled query run.

" + }, + "QueryResultRows":{ + "shape":"Long", + "documentation":"

Number of rows present in the output from running a query before ingestion to destination data source.

" + } + }, + "documentation":"

Statistics for a single scheduled query run.

" }, - "QueryExecutionException":{ + "InternalServerException":{ "type":"structure", "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Timestream was unable to run the query successfully.

", + "documentation":"

Timestream was unable to fully process this request because of an internal server error.

", "exception":true }, - "QueryId":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[a-zA-Z0-9]+" + "InvalidEndpointException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The requested endpoint was not valid.

", + "exception":true }, - "QueryRequest":{ + "ListScheduledQueriesRequest":{ "type":"structure", - "required":["QueryString"], "members":{ - "QueryString":{ - "shape":"QueryString", - "documentation":"

The query to be executed by Timestream.

" - }, - "ClientToken":{ - "shape":"ClientRequestToken", - "documentation":"

Unique, case-sensitive string of up to 64 ASCII characters that you specify when you make a Query request. Providing a ClientToken makes the call to Query idempotent, meaning that multiple identical calls have the same effect as one single call.

Your query request will fail in the following cases:

After 4 hours, any request with the same client token is treated as a new request.

", - "idempotencyToken":true + "MaxResults":{ + "shape":"MaxScheduledQueriesResults", + "documentation":"

The maximum number of items to return in the output. If the total number of items available is more than the value specified, a NextToken is provided in the output. To resume pagination, provide the NextToken value as the argument to the subsequent call to ListScheduledQueriesRequest.

" }, "NextToken":{ - "shape":"String", - "documentation":"

A pagination token passed to get a set of results.

" - }, - "MaxRows":{ - "shape":"MaxQueryResults", - "documentation":"

The total number of rows to return in the output. If the total number of rows available is more than the value specified, a NextToken is provided in the command's output. To resume pagination, provide the NextToken value in the starting-token argument of a subsequent command.

" + "shape":"NextScheduledQueriesResultsToken", + "documentation":"

A pagination token to resume pagination.

" } } }, - "QueryResponse":{ + "ListScheduledQueriesResponse":{ "type":"structure", - "required":[ - "QueryId", + "required":["ScheduledQueries"], + "members":{ + "ScheduledQueries":{ + "shape":"ScheduledQueryList", + "documentation":"

A list of scheduled queries.

" + }, + "NextToken":{ + "shape":"NextScheduledQueriesResultsToken", + "documentation":"

A token to specify where to start paginating. This is the NextToken from a previously truncated response.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceARN"], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Timestream resource with tags to be listed. This value is an Amazon Resource Name (ARN).

" + }, + "MaxResults":{ + "shape":"MaxTagsForResourceResult", + "documentation":"

The maximum number of tags to return.

" + }, + "NextToken":{ + "shape":"NextTagsForResourceResultsToken", + "documentation":"

A pagination token to resume pagination.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

The tags currently associated with the Timestream resource.

" + }, + "NextToken":{ + "shape":"NextTagsForResourceResultsToken", + "documentation":"

A pagination token to resume pagination with a subsequent call to ListTagsForResourceResponse.

" + } + } + }, + "Long":{"type":"long"}, + "MaxQueryResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "MaxScheduledQueriesResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "MaxTagsForResourceResult":{ + "type":"integer", + "box":true, + "max":200, + "min":1 + }, + "MeasureValueType":{ + "type":"string", + "enum":[ + "BIGINT", + "BOOLEAN", + "DOUBLE", + "VARCHAR", + "MULTI" + ] + }, + "MixedMeasureMapping":{ + "type":"structure", + "required":["MeasureValueType"], + "members":{ + "MeasureName":{ + "shape":"SchemaName", + "documentation":"

Refers to the value of measure_name in a result row. This field is required if MeasureNameColumn is provided.

" + }, + "SourceColumn":{ + "shape":"SchemaName", + "documentation":"

This field refers to the source column from which measure-value is to be read for result materialization.

" + }, + "TargetMeasureName":{ + "shape":"SchemaName", + "documentation":"

Target measure name to be used. If not provided, the target measure name by default would be measure-name if provided, or sourceColumn otherwise.

" + }, + "MeasureValueType":{ + "shape":"MeasureValueType", + "documentation":"

Type of the value that is to be read from sourceColumn. If the mapping is for MULTI, use MeasureValueType.MULTI.

" + }, + "MultiMeasureAttributeMappings":{ + "shape":"MultiMeasureAttributeMappingList", + "documentation":"

Required when measureValueType is MULTI. Attribute mappings for MULTI value measures.

" + } + }, + "documentation":"

MixedMeasureMappings are mappings that can be used to ingest data into a mixture of narrow and multi measures in the derived table.

" + }, + "MixedMeasureMappingList":{ + "type":"list", + "member":{"shape":"MixedMeasureMapping"}, + "min":1 + }, + "MultiMeasureAttributeMapping":{ + "type":"structure", + "required":[ + "SourceColumn", + "MeasureValueType" + ], + "members":{ + "SourceColumn":{ + "shape":"SchemaName", + "documentation":"

Source column from where the attribute value is to be read.

" + }, + "TargetMultiMeasureAttributeName":{ + "shape":"SchemaName", + "documentation":"

Custom name to be used for attribute name in derived table. If not provided, source column name would be used.

" + }, + "MeasureValueType":{ + "shape":"ScalarMeasureValueType", + "documentation":"

Type of the attribute to be read from the source column.

" + } + }, + "documentation":"

Attribute mapping for MULTI value measures.

" + }, + "MultiMeasureAttributeMappingList":{ + "type":"list", + "member":{"shape":"MultiMeasureAttributeMapping"}, + "min":1 + }, + "MultiMeasureMappings":{ + "type":"structure", + "required":["MultiMeasureAttributeMappings"], + "members":{ + "TargetMultiMeasureName":{ + "shape":"SchemaName", + "documentation":"

The name of the target multi-measure name in the derived table. This input is required when measureNameColumn is not provided. If MeasureNameColumn is provided, then value from that column will be used as multi-measure name.

" + }, + "MultiMeasureAttributeMappings":{ + "shape":"MultiMeasureAttributeMappingList", + "documentation":"

Required. Attribute mappings to be used for mapping query results to ingest data for multi-measure attributes.

" + } + }, + "documentation":"

Only one of MixedMeasureMappings or MultiMeasureMappings is to be provided. MultiMeasureMappings can be used to ingest data as multi measures in the derived table.

" + }, + "NextScheduledQueriesResultsToken":{"type":"string"}, + "NextTagsForResourceResultsToken":{"type":"string"}, + "NotificationConfiguration":{ + "type":"structure", + "required":["SnsConfiguration"], + "members":{ + "SnsConfiguration":{ + "shape":"SnsConfiguration", + "documentation":"

Details on SNS configuration.

" + } + }, + "documentation":"

Notification configuration for a scheduled query. A notification is sent by Timestream when a scheduled query is created, its state is updated or when it is deleted.

" + }, + "NullableBoolean":{ + "type":"boolean", + "box":true + }, + "PaginationToken":{ + "type":"string", + "max":2048, + "min":1 + }, + "ParameterMapping":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

Parameter name.

" + }, + "Type":{"shape":"Type"} + }, + "documentation":"

Mapping for named parameters.

" + }, + "ParameterMappingList":{ + "type":"list", + "member":{"shape":"ParameterMapping"} + }, + "PrepareQueryRequest":{ + "type":"structure", + "required":["QueryString"], + "members":{ + "QueryString":{ + "shape":"QueryString", + "documentation":"

The Timestream query string that you want to use as a prepared statement. Parameter names can be specified in the query string @ character followed by an identifier.

" + }, + "ValidateOnly":{ + "shape":"NullableBoolean", + "documentation":"

By setting this value to true, Timestream will only validate that the query string is a valid Timestream query, and not store the prepared query for later use.

" + } + } + }, + "PrepareQueryResponse":{ + "type":"structure", + "required":[ + "QueryString", + "Columns", + "Parameters" + ], + "members":{ + "QueryString":{ + "shape":"QueryString", + "documentation":"

The query string that you want prepare.

" + }, + "Columns":{ + "shape":"SelectColumnList", + "documentation":"

A list of SELECT clause columns of the submitted query string.

" + }, + "Parameters":{ + "shape":"ParameterMappingList", + "documentation":"

A list of parameters used in the submitted query string.

" + } + } + }, + "QueryExecutionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Timestream was unable to run the query successfully.

", + "exception":true + }, + "QueryId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9]+" + }, + "QueryRequest":{ + "type":"structure", + "required":["QueryString"], + "members":{ + "QueryString":{ + "shape":"QueryString", + "documentation":"

The query to be run by Timestream.

" + }, + "ClientToken":{ + "shape":"ClientRequestToken", + "documentation":"

Unique, case-sensitive string of up to 64 ASCII characters specified when a Query request is made. Providing a ClientToken makes the call to Query idempotent. This means that running the same query repeatedly will produce the same result. In other words, making multiple identical Query requests has the same effect as making a single request. When using ClientToken in a query, note the following:

", + "idempotencyToken":true + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

A pagination token used to return a set of results. When the Query API is invoked using NextToken, that particular invocation is assumed to be a subsequent invocation of a prior call to Query, and a result set is returned. However, if the Query invocation only contains the ClientToken, that invocation of Query is assumed to be a new query run.

Note the following when using NextToken in a query:

" + }, + "MaxRows":{ + "shape":"MaxQueryResults", + "documentation":"

The total number of rows to be returned in the Query output. The initial run of Query with a MaxRows value specified will return the result set of the query in two cases:

Otherwise, the initial invocation of Query only returns a NextToken, which can then be used in subsequent calls to fetch the result set. To resume pagination, provide the NextToken value in the subsequent command.

If the row size is large (e.g. a row has many columns), Timestream may return fewer rows to keep the response size from exceeding the 1 MB limit. If MaxRows is not provided, Timestream will send the necessary number of rows to meet the 1 MB limit.

" + } + } + }, + "QueryResponse":{ + "type":"structure", + "required":[ + "QueryId", "Rows", "ColumnInfo" ], @@ -279,7 +892,7 @@ "documentation":"

A unique ID for the given query.

" }, "NextToken":{ - "shape":"String", + "shape":"PaginationToken", "documentation":"

A pagination token that can be used again on a Query call to get the next set of results.

" }, "Rows":{ @@ -292,7 +905,7 @@ }, "QueryStatus":{ "shape":"QueryStatus", - "documentation":"

Information about the status of the query, including progress and bytes scannned.

" + "documentation":"

Information about the status of the query, including progress and bytes scanned.

" } } }, @@ -309,15 +922,30 @@ }, "CumulativeBytesMetered":{ "shape":"Long", - "documentation":"

The amount of data scanned by the query in bytes that you will be charged for. This is a cumulative sum and represents the total amount of data that you will be charged for since the query was started. The charge is applied only once and is either applied when the query completes execution or when the query is cancelled.

" + "documentation":"

The amount of data scanned by the query in bytes that you will be charged for. This is a cumulative sum and represents the total amount of data that you will be charged for since the query was started. The charge is applied only once and is either applied when the query completes running or when the query is cancelled.

" } }, - "documentation":"

Information about the status of the query, including progress and bytes scannned.

" + "documentation":"

Information about the status of the query, including progress and bytes scanned.

" }, "QueryString":{ "type":"string", + "max":262144, + "min":1, "sensitive":true }, + "ResourceName":{"type":"string"}, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"}, + "ScheduledQueryArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the scheduled query.

" + } + }, + "documentation":"

The requested resource could not be found.

", + "exception":true + }, "Row":{ "type":"structure", "required":["Data"], @@ -333,6 +961,68 @@ "type":"list", "member":{"shape":"Row"} }, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]" + }, + "S3Configuration":{ + "type":"structure", + "required":["BucketName"], + "members":{ + "BucketName":{ + "shape":"S3BucketName", + "documentation":"

Name of the S3 bucket under which error reports will be created.

" + }, + "ObjectKeyPrefix":{ + "shape":"S3ObjectKeyPrefix", + "documentation":"

Prefix for the error report key. Timestream by default adds the following prefix to the error report path.

" + }, + "EncryptionOption":{ + "shape":"S3EncryptionOption", + "documentation":"

Encryption at rest options for the error reports. If no encryption option is specified, Timestream will choose SSE_S3 as default.

" + } + }, + "documentation":"

Details on S3 location for error reports that result from running a query.

" + }, + "S3EncryptionOption":{ + "type":"string", + "enum":[ + "SSE_S3", + "SSE_KMS" + ] + }, + "S3ObjectKey":{"type":"string"}, + "S3ObjectKeyPrefix":{ + "type":"string", + "max":896, + "min":1, + "pattern":"[a-zA-Z0-9|!\\-_*'\\(\\)]([a-zA-Z0-9]|[!\\-_*'\\(\\)\\/.])+" + }, + "S3ReportLocation":{ + "type":"structure", + "members":{ + "BucketName":{ + "shape":"S3BucketName", + "documentation":"

S3 bucket name.

" + }, + "ObjectKey":{ + "shape":"S3ObjectKey", + "documentation":"

S3 key.

" + } + }, + "documentation":"

S3 report location for the scheduled query run.

" + }, + "ScalarMeasureValueType":{ + "type":"string", + "enum":[ + "BIGINT", + "BOOLEAN", + "DOUBLE", + "VARCHAR" + ] + }, "ScalarType":{ "type":"string", "enum":[ @@ -350,8 +1040,340 @@ ] }, "ScalarValue":{"type":"string"}, + "ScheduleConfiguration":{ + "type":"structure", + "required":["ScheduleExpression"], + "members":{ + "ScheduleExpression":{ + "shape":"ScheduleExpression", + "documentation":"

An expression that denotes when to trigger the scheduled query run. This can be a cron expression or a rate expression.

" + } + }, + "documentation":"

Configuration of the schedule of the query.

" + }, + "ScheduleExpression":{ + "type":"string", + "max":256, + "min":1 + }, + "ScheduledQuery":{ + "type":"structure", + "required":[ + "Arn", + "Name", + "State" + ], + "members":{ + "Arn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name.

" + }, + "Name":{ + "shape":"ScheduledQueryName", + "documentation":"

The name of the scheduled query.

" + }, + "CreationTime":{ + "shape":"Time", + "documentation":"

The creation time of the scheduled query.

" + }, + "State":{ + "shape":"ScheduledQueryState", + "documentation":"

State of scheduled query.

" + }, + "PreviousInvocationTime":{ + "shape":"Time", + "documentation":"

The last time the scheduled query was run.

" + }, + "NextInvocationTime":{ + "shape":"Time", + "documentation":"

The next time the scheduled query is to be run.

" + }, + "ErrorReportConfiguration":{ + "shape":"ErrorReportConfiguration", + "documentation":"

Configuration for scheduled query error reporting.

" + }, + "TargetDestination":{ + "shape":"TargetDestination", + "documentation":"

Target data source where final scheduled query result will be written.

" + }, + "LastRunStatus":{ + "shape":"ScheduledQueryRunStatus", + "documentation":"

Status of the last scheduled query run.

" + } + }, + "documentation":"

Scheduled Query

" + }, + "ScheduledQueryDescription":{ + "type":"structure", + "required":[ + "Arn", + "Name", + "QueryString", + "State", + "ScheduleConfiguration", + "NotificationConfiguration" + ], + "members":{ + "Arn":{ + "shape":"AmazonResourceName", + "documentation":"

Scheduled query ARN.

" + }, + "Name":{ + "shape":"ScheduledQueryName", + "documentation":"

Name of the scheduled query.

" + }, + "QueryString":{ + "shape":"QueryString", + "documentation":"

The query to be run.

" + }, + "CreationTime":{ + "shape":"Time", + "documentation":"

Creation time of the scheduled query.

" + }, + "State":{ + "shape":"ScheduledQueryState", + "documentation":"

State of the scheduled query.

" + }, + "PreviousInvocationTime":{ + "shape":"Time", + "documentation":"

Last time the query was run.

" + }, + "NextInvocationTime":{ + "shape":"Time", + "documentation":"

The next time the scheduled query is scheduled to run.

" + }, + "ScheduleConfiguration":{ + "shape":"ScheduleConfiguration", + "documentation":"

Schedule configuration.

" + }, + "NotificationConfiguration":{ + "shape":"NotificationConfiguration", + "documentation":"

Notification configuration.

" + }, + "TargetConfiguration":{ + "shape":"TargetConfiguration", + "documentation":"

Scheduled query target store configuration.

" + }, + "ScheduledQueryExecutionRoleArn":{ + "shape":"AmazonResourceName", + "documentation":"

IAM role that Timestream uses to run the schedule query.

" + }, + "KmsKeyId":{ + "shape":"StringValue2048", + "documentation":"

A customer provided KMS key used to encrypt the scheduled query resource.

" + }, + "ErrorReportConfiguration":{ + "shape":"ErrorReportConfiguration", + "documentation":"

Error-reporting configuration for the scheduled query.

" + }, + "LastRunSummary":{ + "shape":"ScheduledQueryRunSummary", + "documentation":"

Runtime summary for the last scheduled query run.

" + }, + "RecentlyFailedRuns":{ + "shape":"ScheduledQueryRunSummaryList", + "documentation":"

Runtime summary for the last five failed scheduled query runs.

" + } + }, + "documentation":"

Structure that describes scheduled query.

" + }, + "ScheduledQueryList":{ + "type":"list", + "member":{"shape":"ScheduledQuery"} + }, + "ScheduledQueryName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "ScheduledQueryRunStatus":{ + "type":"string", + "enum":[ + "AUTO_TRIGGER_SUCCESS", + "AUTO_TRIGGER_FAILURE", + "MANUAL_TRIGGER_SUCCESS", + "MANUAL_TRIGGER_FAILURE" + ] + }, + "ScheduledQueryRunSummary":{ + "type":"structure", + "members":{ + "InvocationTime":{ + "shape":"Time", + "documentation":"

InvocationTime for this run. This is the time at which the query is scheduled to run. Parameter @scheduled_runtime can be used in the query to get the value.

" + }, + "TriggerTime":{ + "shape":"Time", + "documentation":"

The actual time when the query was run.

" + }, + "RunStatus":{ + "shape":"ScheduledQueryRunStatus", + "documentation":"

The status of a scheduled query run.

" + }, + "ExecutionStats":{ + "shape":"ExecutionStats", + "documentation":"

Runtime statistics for a scheduled run.

" + }, + "ErrorReportLocation":{ + "shape":"ErrorReportLocation", + "documentation":"

S3 location for error report.

" + }, + "FailureReason":{ + "shape":"ErrorMessage", + "documentation":"

Error message for the scheduled query in case of failure. You might have to look at the error report to get more detailed error reasons.

" + } + }, + "documentation":"

Run summary for the scheduled query

" + }, + "ScheduledQueryRunSummaryList":{ + "type":"list", + "member":{"shape":"ScheduledQueryRunSummary"} + }, + "ScheduledQueryState":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "SchemaName":{"type":"string"}, + "SelectColumn":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

Name of the column.

" + }, + "Type":{"shape":"Type"}, + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

Database that has this column.

" + }, + "TableName":{ + "shape":"ResourceName", + "documentation":"

Table within the database that has this column.

" + }, + "Aliased":{ + "shape":"NullableBoolean", + "documentation":"

True, if the column name was aliased by the query. False otherwise.

" + } + }, + "documentation":"

Details of the column that is returned by the query.

" + }, + "SelectColumnList":{ + "type":"list", + "member":{"shape":"SelectColumn"} + }, "ServiceErrorMessage":{"type":"string"}, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

You have exceeded the service quota.

", + "exception":true + }, + "SnsConfiguration":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{ + "shape":"AmazonResourceName", + "documentation":"

SNS topic ARN that the scheduled query status notifications will be sent to.

" + } + }, + "documentation":"

Details on SNS that are required to send the notification.

" + }, "String":{"type":"string"}, + "StringValue2048":{ + "type":"string", + "max":2048, + "min":1 + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The key of the tag. Tag keys are case sensitive.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The value of the tag. Tag values are case sensitive and can be null.

" + } + }, + "documentation":"

A tag is a label that you assign to a Timestream database and/or table. Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize databases and/or tables, for example, by purpose, owner, or environment.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "Tags" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

Identifies the Timestream resource to which tags should be added. This value is an Amazon Resource Name (ARN).

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags to be assigned to the Timestream resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TargetConfiguration":{ + "type":"structure", + "required":["TimestreamConfiguration"], + "members":{ + "TimestreamConfiguration":{ + "shape":"TimestreamConfiguration", + "documentation":"

Configuration needed to write data into the Timestream database and table.

" + } + }, + "documentation":"

Configuration used for writing the output of a query.

" + }, + "TargetDestination":{ + "type":"structure", + "members":{ + "TimestreamDestination":{ + "shape":"TimestreamDestination", + "documentation":"

Query result destination details for Timestream data source.

" + } + }, + "documentation":"

Destination details to write data for a target data source. Current supported data source is Timestream.

" + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -360,6 +1382,7 @@ "documentation":"

The request was denied due to request throttling.

", "exception":true }, + "Time":{"type":"timestamp"}, "TimeSeriesDataPoint":{ "type":"structure", "required":[ @@ -376,19 +1399,73 @@ "documentation":"

The measure value for the data point.

" } }, - "documentation":"

The timeseries datatype represents the values of a measure over time. A time series is an array of rows of timestamps and measure values, with rows sorted in ascending order of time. A TimeSeriesDataPoint is a single data point in the timeseries. It represents a tuple of (time, measure value) in a timeseries.

" + "documentation":"

The timeseries data type represents the values of a measure over time. A time series is an array of rows of timestamps and measure values, with rows sorted in ascending order of time. A TimeSeriesDataPoint is a single data point in the time series. It represents a tuple of (time, measure value) in a time series.

" }, "TimeSeriesDataPointList":{ "type":"list", "member":{"shape":"TimeSeriesDataPoint"} }, "Timestamp":{"type":"string"}, + "TimestreamConfiguration":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "TimeColumn", + "DimensionMappings" + ], + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

Name of Timestream database to which the query result will be written.

" + }, + "TableName":{ + "shape":"ResourceName", + "documentation":"

Name of Timestream table that the query result will be written to. The table should be within the same database that is provided in Timestream configuration.

" + }, + "TimeColumn":{ + "shape":"SchemaName", + "documentation":"

Column from query result that should be used as the time column in destination table. Column type for this should be TIMESTAMP.

" + }, + "DimensionMappings":{ + "shape":"DimensionMappingList", + "documentation":"

This is to allow mapping column(s) from the query result to the dimension in the destination table.

" + }, + "MultiMeasureMappings":{ + "shape":"MultiMeasureMappings", + "documentation":"

Multi-measure mappings.

" + }, + "MixedMeasureMappings":{ + "shape":"MixedMeasureMappingList", + "documentation":"

Specifies how to map measures to multi-measure records.

" + }, + "MeasureNameColumn":{ + "shape":"SchemaName", + "documentation":"

Name of the measure column.

" + } + }, + "documentation":"

Configuration to write data into Timestream database and table. This configuration allows the user to map the query result select columns into the destination table columns.

" + }, + "TimestreamDestination":{ + "type":"structure", + "members":{ + "DatabaseName":{ + "shape":"ResourceName", + "documentation":"

Timestream database name.

" + }, + "TableName":{ + "shape":"ResourceName", + "documentation":"

Timestream table name.

" + } + }, + "documentation":"

Destination for scheduled query.

" + }, "Type":{ "type":"structure", "members":{ "ScalarType":{ "shape":"ScalarType", - "documentation":"

Indicates if the column is of type string, integer, boolean, double, timestamp, date, time.

" + "documentation":"

Indicates if the column is of type string, integer, Boolean, double, timestamp, date, time.

" }, "ArrayColumnInfo":{ "shape":"ColumnInfo", @@ -403,7 +1480,46 @@ "documentation":"

Indicates if the column is a row.

" } }, - "documentation":"

Contains the data type of a column in a query result set. The data type can be scalar or complex. The supported scalar data types are integers, boolean, string, double, timestamp, date, time, and intervals. The supported complex data types are arrays, rows, and timeseries.

" + "documentation":"

Contains the data type of a column in a query result set. The data type can be scalar or complex. The supported scalar data types are integers, Boolean, string, double, timestamp, date, time, and intervals. The supported complex data types are arrays, rows, and timeseries.

" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceARN", + "TagKeys" + ], + "members":{ + "ResourceARN":{ + "shape":"AmazonResourceName", + "documentation":"

The Timestream resource that the tags will be removed from. This value is an Amazon Resource Name (ARN).

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

A list of tags keys. Existing tags of the resource whose keys are members of this list will be removed from the Timestream resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateScheduledQueryRequest":{ + "type":"structure", + "required":[ + "ScheduledQueryArn", + "State" + ], + "members":{ + "ScheduledQueryArn":{ + "shape":"AmazonResourceName", + "documentation":"

ARN of the scheuled query.

" + }, + "State":{ + "shape":"ScheduledQueryState", + "documentation":"

State of the scheduled query.

" + } + } }, "ValidationException":{ "type":"structure", @@ -414,5 +1530,5 @@ "exception":true } }, - "documentation":"

" + "documentation":"Amazon Timestream Query

" } diff --git a/botocore/data/timestream-write/2018-11-01/service-2.json b/botocore/data/timestream-write/2018-11-01/service-2.json index 9ced833c86..64a6866b42 100644 --- a/botocore/data/timestream-write/2018-11-01/service-2.json +++ b/botocore/data/timestream-write/2018-11-01/service-2.json @@ -29,9 +29,10 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"}, {"shape":"InvalidEndpointException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"InvalidEndpointException"} ], - "documentation":"

Creates a new Timestream database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to AWS managed KMS keys for more info. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", + "documentation":"

Creates a new Timestream database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to Amazon Web Services managed KMS keys for more info. Service quotas apply. See code sample for details.

", "endpointdiscovery":{"required":true} }, "CreateTable":{ @@ -49,10 +50,11 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"}, + {"shape":"InvalidEndpointException"}, {"shape":"InternalServerException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

The CreateTable operation adds a new table to an existing database in your account. In an AWS account, table names must be at least unique within each Region if they are in the same database. You may have identical table names in the same Region if the tables are in seperate databases. While creating the table, you must specify the table name, database name, and the retention properties. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", + "documentation":"

The CreateTable operation adds a new table to an existing database in your account. In an Amazon Web Services account, table names must be at least unique within each Region if they are in the same database. You may have identical table names in the same Region if the tables are in separate databases. While creating the table, you must specify the table name, database name, and the retention properties. Service quotas apply. See code sample for details.

", "endpointdiscovery":{"required":true} }, "DeleteDatabase":{ @@ -70,7 +72,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered.

All tables in the database must be deleted first, or a ValidationException error will be thrown.

Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

", + "documentation":"

Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered.

All tables in the database must be deleted first, or a ValidationException error will be thrown.

Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

See code sample for details.

", "endpointdiscovery":{"required":true} }, "DeleteTable":{ @@ -88,7 +90,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered.

Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

", + "documentation":"

Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered.

Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.

See code sample for details.

", "endpointdiscovery":{"required":true} }, "DescribeDatabase":{ @@ -107,7 +109,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Returns information about the database, including the database name, time that the database was created, and the total number of tables found within the database. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", + "documentation":"

Returns information about the database, including the database name, time that the database was created, and the total number of tables found within the database. Service quotas apply. See code sample for details.

", "endpointdiscovery":{"required":true} }, "DescribeEndpoints":{ @@ -123,7 +125,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query.

Because Timestream’s SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless:

For detailed information on how to use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs.

", + "documentation":"

DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query.

Because the Timestream SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless:

For detailed information on how and when to use and implement DescribeEndpoints, see The Endpoint Discovery Pattern.

", "endpointoperation":true }, "DescribeTable":{ @@ -142,7 +144,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Returns information about the table, including the table name, database name, retention duration of the memory store and the magnetic store. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", + "documentation":"

Returns information about the table, including the table name, database name, retention duration of the memory store and the magnetic store. Service quotas apply. See code sample for details.

", "endpointdiscovery":{"required":true} }, "ListDatabases":{ @@ -160,7 +162,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Returns a list of your Timestream databases. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", + "documentation":"

Returns a list of your Timestream databases. Service quotas apply. See code sample for details.

", "endpointdiscovery":{"required":true} }, "ListTables":{ @@ -179,7 +181,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

A list of tables, along with the name, status and retention properties of each table.

", + "documentation":"

A list of tables, along with the name, status and retention properties of each table. See code sample for details.

", "endpointdiscovery":{"required":true} }, "ListTagsForResource":{ @@ -252,7 +254,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Modifies the KMS key for an existing database. While updating the database, you must specify the database name and the identifier of the new KMS key to be used (KmsKeyId). If there are any concurrent UpdateDatabase requests, first writer wins.

", + "documentation":"

Modifies the KMS key for an existing database. While updating the database, you must specify the database name and the identifier of the new KMS key to be used (KmsKeyId). If there are any concurrent UpdateDatabase requests, first writer wins.

See code sample for details.

", "endpointdiscovery":{"required":true} }, "UpdateTable":{ @@ -271,7 +273,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

Modifies the retention duration of the memory store and magnetic store for your Timestream table. Note that the change in retention duration takes effect immediately. For example, if the retention period of the memory store was initially set to 2 hours and then changed to 24 hours, the memory store will be capable of holding 24 hours of data, but will be populated with 24 hours of data 22 hours after this change was made. Timestream does not retrieve data from the magnetic store to populate the memory store.

Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", + "documentation":"

Modifies the retention duration of the memory store and magnetic store for your Timestream table. Note that the change in retention duration takes effect immediately. For example, if the retention period of the memory store was initially set to 2 hours and then changed to 24 hours, the memory store will be capable of holding 24 hours of data, but will be populated with 24 hours of data 22 hours after this change was made. Timestream does not retrieve data from the magnetic store to populate the memory store.

See code sample for details.

", "endpointdiscovery":{"required":true} }, "WriteRecords":{ @@ -281,6 +283,7 @@ "requestUri":"/" }, "input":{"shape":"WriteRecordsRequest"}, + "output":{"shape":"WriteRecordsResponse"}, "errors":[ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"}, @@ -290,7 +293,7 @@ {"shape":"RejectedRecordsException"}, {"shape":"InvalidEndpointException"} ], - "documentation":"

The WriteRecords operation enables you to write your time series data into Timestream. You can specify a single data point or a batch of data points to be inserted into the system. Timestream offers you with a flexible schema that auto detects the column names and data types for your Timestream tables based on the dimension names and data types of the data points you specify when invoking writes into the database. Timestream support eventual consistency read semantics. This means that when you query data immediately after writing a batch of data into Timestream, the query results might not reflect the results of a recently completed write operation. The results may also include some stale data. If you repeat the query request after a short time, the results should return the latest data. Service quotas apply. For more information, see Access Management in the Timestream Developer Guide.

", + "documentation":"

The WriteRecords operation enables you to write your time series data into Timestream. You can specify a single data point or a batch of data points to be inserted into the system. Timestream offers you with a flexible schema that auto detects the column names and data types for your Timestream tables based on the dimension names and data types of the data points you specify when invoking writes into the database. Timestream support eventual consistency read semantics. This means that when you query data immediately after writing a batch of data into Timestream, the query results might not reflect the results of a recently completed write operation. The results may also include some stale data. If you repeat the query request after a short time, the results should return the latest data. Service quotas apply.

See code sample for details.

Upserts

You can use the Version parameter in a WriteRecords request to update data points. Timestream tracks a version number with each record. Version defaults to 1 when not specified for the record in the request. Timestream will update an existing record’s measure value along with its Version upon receiving a write request with a higher Version number for that record. Upon receiving an update request where the measure value is the same as that of the existing record, Timestream still updates Version, if it is greater than the existing value of Version. You can update a data point as many times as desired, as long as the value of Version continuously increases.

For example, suppose you write a new record without indicating Version in the request. Timestream will store this record, and set Version to 1. Now, suppose you try to update this record with a WriteRecords request of the same record with a different measure value but, like before, do not provide Version. In this case, Timestream will reject this update with a RejectedRecordsException since the updated record’s version is not greater than the existing value of Version. However, if you were to resend the update request with Version set to 2, Timestream would then succeed in updating the record’s value, and the Version would be set to 2. Next, suppose you sent a WriteRecords request with this same record and an identical measure value, but with Version set to 3. In this case, Timestream would only update Version to 3. Any further updates would need to send a version number greater than 3, or the update requests would receive a RejectedRecordsException.

", "endpointdiscovery":{"required":true} } }, @@ -309,6 +312,7 @@ "max":1011, "min":1 }, + "Boolean":{"type":"boolean"}, "ConflictException":{ "type":"structure", "required":["Message"], @@ -323,12 +327,12 @@ "required":["DatabaseName"], "members":{ "DatabaseName":{ - "shape":"ResourceName", + "shape":"ResourceCreateAPIName", "documentation":"

The name of the Timestream database.

" }, "KmsKeyId":{ "shape":"StringValue2048", - "documentation":"

The KMS key for the database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to AWS managed KMS keys for more info.

" + "documentation":"

The KMS key for the database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to Amazon Web Services managed KMS keys for more info.

" }, "Tags":{ "shape":"TagList", @@ -353,11 +357,11 @@ ], "members":{ "DatabaseName":{ - "shape":"ResourceName", + "shape":"ResourceCreateAPIName", "documentation":"

The name of the Timestream database.

" }, "TableName":{ - "shape":"ResourceName", + "shape":"ResourceCreateAPIName", "documentation":"

The name of the Timestream table.

" }, "RetentionProperties":{ @@ -367,6 +371,10 @@ "Tags":{ "shape":"TagList", "documentation":"

A list of key-value pairs to label the table.

" + }, + "MagneticStoreWriteProperties":{ + "shape":"MagneticStoreWriteProperties", + "documentation":"

Contains properties to set on the table when enabling magnetic store writes.

" } } }, @@ -509,11 +517,11 @@ ], "members":{ "Name":{ - "shape":"StringValue256", + "shape":"SchemaName", "documentation":"

Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions.

For constraints on Dimension names, see Naming Constraints.

" }, "Value":{ - "shape":"StringValue2048", + "shape":"SchemaValue", "documentation":"

The value of the dimension.

" }, "DimensionValueType":{ @@ -555,6 +563,7 @@ "member":{"shape":"Endpoint"} }, "ErrorMessage":{"type":"string"}, + "Integer":{"type":"integer"}, "InternalServerException":{ "type":"structure", "required":["Message"], @@ -649,20 +658,74 @@ } }, "Long":{"type":"long"}, + "MagneticStoreRejectedDataLocation":{ + "type":"structure", + "members":{ + "S3Configuration":{ + "shape":"S3Configuration", + "documentation":"

Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes.

" + } + }, + "documentation":"

The location to write error reports for records rejected, asynchronously, during magnetic store writes.

" + }, "MagneticStoreRetentionPeriodInDays":{ "type":"long", "max":73000, "min":1 }, + "MagneticStoreWriteProperties":{ + "type":"structure", + "required":["EnableMagneticStoreWrites"], + "members":{ + "EnableMagneticStoreWrites":{ + "shape":"Boolean", + "documentation":"

A flag to enable magnetic store writes.

" + }, + "MagneticStoreRejectedDataLocation":{ + "shape":"MagneticStoreRejectedDataLocation", + "documentation":"

The location to write error reports for records rejected asynchronously during magnetic store writes.

" + } + }, + "documentation":"

The set of properties on a table for configuring magnetic store writes.

" + }, + "MeasureValue":{ + "type":"structure", + "required":[ + "Name", + "Value", + "Type" + ], + "members":{ + "Name":{ + "shape":"SchemaName", + "documentation":"

Name of the MeasureValue.

For constraints on MeasureValue names, refer to Naming Constraints in the Timestream developer guide.

" + }, + "Value":{ + "shape":"StringValue2048", + "documentation":"

Value for the MeasureValue.

" + }, + "Type":{ + "shape":"MeasureValueType", + "documentation":"

Contains the data type of the MeasureValue for the time series data point.

" + } + }, + "documentation":"

MeasureValue represents the data attribute of the time series. For example, the CPU utilization of an EC2 instance or the RPM of a wind turbine are measures. MeasureValue has both name and value.

MeasureValue is only allowed for type MULTI. Using MULTI type, you can pass multiple data attributes associated with the same time series in a single record

" + }, "MeasureValueType":{ "type":"string", "enum":[ "DOUBLE", "BIGINT", "VARCHAR", - "BOOLEAN" + "BOOLEAN", + "TIMESTAMP", + "MULTI" ] }, + "MeasureValues":{ + "type":"list", + "member":{"shape":"MeasureValue"} + }, "MemoryStoreRetentionPeriodInHours":{ "type":"long", "max":8766, @@ -682,7 +745,7 @@ "documentation":"

Contains the list of dimensions for time series data points.

" }, "MeasureName":{ - "shape":"StringValue256", + "shape":"SchemaName", "documentation":"

Measure represents the data attribute of the time series. For example, the CPU utilization of an EC2 instance or the RPM of a wind turbine are measures.

" }, "MeasureValue":{ @@ -691,7 +754,7 @@ }, "MeasureValueType":{ "shape":"MeasureValueType", - "documentation":"

Contains the data type of the measure value for the time series data point.

" + "documentation":"

Contains the data type of the measure value for the time series data point. Default type is DOUBLE.

" }, "Time":{ "shape":"StringValue256", @@ -699,15 +762,19 @@ }, "TimeUnit":{ "shape":"TimeUnit", - "documentation":"

The granularity of the timestamp unit. It indicates if the time value is in seconds, milliseconds, nanoseconds or other supported values.

" + "documentation":"

The granularity of the timestamp unit. It indicates if the time value is in seconds, milliseconds, nanoseconds or other supported values. Default is MILLISECONDS.

" }, "Version":{ "shape":"RecordVersion", - "documentation":"

64-bit attribute used for record updates. Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version will still be updated . Default value is to 1.

", + "documentation":"

64-bit attribute used for record updates. Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version will still be updated . Default value is 1.

Version must be 1 or greater, or you will receive a ValidationException error.

", "box":true + }, + "MeasureValues":{ + "shape":"MeasureValues", + "documentation":"

Contains the list of MeasureValue for time series data points.

This is only allowed for type MULTI. For scalar values, use MeasureValue attribute of the Record directly.

" } }, - "documentation":"

Record represents a time series data point being written into Timestream. Each record contains an array of dimensions. Dimensions represent the meta data attributes of a time series data point such as the instance name or availability zone of an EC2 instance. A record also contains the measure name which is the name of the measure being collected for example the CPU utilization of an EC2 instance. A record also contains the measure value and the value type which is the data type of the measure value. In addition, the record contains the timestamp when the measure was collected that the timestamp unit which represents the granularity of the timestamp.

" + "documentation":"

Record represents a time series data point being written into Timestream. Each record contains an array of dimensions. Dimensions represent the meta data attributes of a time series data point such as the instance name or availability zone of an EC2 instance. A record also contains the measure name which is the name of the measure being collected for example the CPU utilization of an EC2 instance. A record also contains the measure value and the value type which is the data type of the measure value. In addition, the record contains the timestamp when the measure was collected that the timestamp unit which represents the granularity of the timestamp.

Records have a Version field, which is a 64-bit long that you can use for updating data points. Writes of a duplicate record with the same dimension, timestamp, and measure name but different measure value will only succeed if the Version attribute of the record in the write request is higher than that of the existing record. Timestream defaults to a Version of 1 for records without the Version field.

" }, "RecordIndex":{"type":"integer"}, "RecordVersion":{"type":"long"}, @@ -717,6 +784,24 @@ "max":100, "min":1 }, + "RecordsIngested":{ + "type":"structure", + "members":{ + "Total":{ + "shape":"Integer", + "documentation":"

Total count of successfully ingested records.

" + }, + "MemoryStore":{ + "shape":"Integer", + "documentation":"

Count of records ingested into the memory store.

" + }, + "MagneticStore":{ + "shape":"Integer", + "documentation":"

Count of records ingested into the magnetic store.

" + } + }, + "documentation":"

Information on the records ingested by this request.

" + }, "RejectedRecord":{ "type":"structure", "members":{ @@ -726,7 +811,7 @@ }, "Reason":{ "shape":"ErrorMessage", - "documentation":"

The reason why a record was not successfully inserted into Timestream. Possible causes of failure include:

For more information, see Access Management in the Timestream Developer Guide.

" + "documentation":"

The reason why a record was not successfully inserted into Timestream. Possible causes of failure include:

For more information, see Access Management in the Timestream Developer Guide.

" }, "ExistingVersion":{ "shape":"RecordVersion", @@ -746,15 +831,14 @@ "Message":{"shape":"ErrorMessage"}, "RejectedRecords":{"shape":"RejectedRecords"} }, - "documentation":"

WriteRecords would throw this exception in the following cases:

For more information, see Access Management in the Timestream Developer Guide.

", + "documentation":"

WriteRecords would throw this exception in the following cases:

For more information, see Quotas in the Timestream Developer Guide.

", "exception":true }, - "ResourceName":{ + "ResourceCreateAPIName":{ "type":"string", - "max":64, - "min":3, "pattern":"[a-zA-Z0-9_.-]+" }, + "ResourceName":{"type":"string"}, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -781,6 +865,52 @@ }, "documentation":"

Retention properties contain the duration for which your time series data must be stored in the magnetic store and the memory store.

" }, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]" + }, + "S3Configuration":{ + "type":"structure", + "members":{ + "BucketName":{ + "shape":"S3BucketName", + "documentation":"

>Bucket name of the customer S3 bucket.

" + }, + "ObjectKeyPrefix":{ + "shape":"S3ObjectKeyPrefix", + "documentation":"

Object key preview for the customer S3 location.

" + }, + "EncryptionOption":{ + "shape":"S3EncryptionOption", + "documentation":"

Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key.

" + }, + "KmsKeyId":{ + "shape":"StringValue2048", + "documentation":"

KMS key id for the customer s3 location when encrypting with a KMS managed key.

" + } + }, + "documentation":"

Configuration specifing an S3 location.

" + }, + "S3EncryptionOption":{ + "type":"string", + "enum":[ + "SSE_S3", + "SSE_KMS" + ] + }, + "S3ObjectKeyPrefix":{ + "type":"string", + "max":928, + "min":1, + "pattern":"[a-zA-Z0-9|!\\-_*'\\(\\)]([a-zA-Z0-9]|[!\\-_*'\\(\\)\\/.])+" + }, + "SchemaName":{ + "type":"string", + "min":1 + }, + "SchemaValue":{"type":"string"}, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -830,6 +960,10 @@ "LastUpdatedTime":{ "shape":"Date", "documentation":"

The time when the Timestream table was last updated.

" + }, + "MagneticStoreWriteProperties":{ + "shape":"MagneticStoreWriteProperties", + "documentation":"

Contains properties to set on the table when enabling magnetic store writes.

" } }, "documentation":"

Table represents a database table in Timestream. Tables contain one or more related time series. You can modify the retention duration of the memory store and the magnetic store for a table.

" @@ -974,8 +1108,7 @@ "type":"structure", "required":[ "DatabaseName", - "TableName", - "RetentionProperties" + "TableName" ], "members":{ "DatabaseName":{ @@ -984,11 +1117,15 @@ }, "TableName":{ "shape":"ResourceName", - "documentation":"

The name of the Timesream table.

" + "documentation":"

The name of the Timestream table.

" }, "RetentionProperties":{ "shape":"RetentionProperties", "documentation":"

The retention duration of the memory store and the magnetic store.

" + }, + "MagneticStoreWriteProperties":{ + "shape":"MagneticStoreWriteProperties", + "documentation":"

Contains properties to set on the table when enabling magnetic store writes.

" } } }, @@ -1024,18 +1161,27 @@ }, "TableName":{ "shape":"ResourceName", - "documentation":"

The name of the Timesream table.

" + "documentation":"

The name of the Timestream table.

" }, "CommonAttributes":{ "shape":"Record", - "documentation":"

A record containing the common measure and dimension attributes shared across all the records in the request. The measure and dimension attributes specified in here will be merged with the measure and dimension attributes in the records object when the data is written into Timestream.

" + "documentation":"

A record containing the common measure, dimension, time, and version attributes shared across all the records in the request. The measure and dimension attributes specified will be merged with the measure and dimension attributes in the records object when the data is written into Timestream. Dimensions may not overlap, or a ValidationException will be thrown. In other words, a record must contain dimensions with unique names.

" }, "Records":{ "shape":"Records", - "documentation":"

An array of records containing the unique dimension and measure attributes for each time series data point.

" + "documentation":"

An array of records containing the unique measure, dimension, time, and version attributes for each time series data point.

" + } + } + }, + "WriteRecordsResponse":{ + "type":"structure", + "members":{ + "RecordsIngested":{ + "shape":"RecordsIngested", + "documentation":"

Information on the records ingested by this request.

" } } } }, - "documentation":"

Amazon Timestream is a fast, scalable, fully managed time series database service that makes it easy to store and analyze trillions of time series data points per day. With Timestream, you can easily store and analyze IoT sensor data to derive insights from your IoT applications. You can analyze industrial telemetry to streamline equipment management and maintenance. You can also store and analyze log data and metrics to improve the performance and availability of your applications. Timestream is built from the ground up to effectively ingest, process, and store time series data. It organizes data to optimize query processing. It automatically scales based on the volume of data ingested and on the query volume to ensure you receive optimal performance while inserting and querying data. As your data grows over time, Timestream’s adaptive query processing engine spans across storage tiers to provide fast analysis while reducing costs.

" + "documentation":"Amazon Timestream Write

Amazon Timestream is a fast, scalable, fully managed time series database service that makes it easy to store and analyze trillions of time series data points per day. With Timestream, you can easily store and analyze IoT sensor data to derive insights from your IoT applications. You can analyze industrial telemetry to streamline equipment management and maintenance. You can also store and analyze log data and metrics to improve the performance and availability of your applications. Timestream is built from the ground up to effectively ingest, process, and store time series data. It organizes data to optimize query processing. It automatically scales based on the volume of data ingested and on the query volume to ensure you receive optimal performance while inserting and querying data. As your data grows over time, Timestream’s adaptive query processing engine spans across storage tiers to provide fast analysis while reducing costs.

" } diff --git a/botocore/data/translate/2017-07-01/service-2.json b/botocore/data/translate/2017-07-01/service-2.json index 081789a96c..f4ed2144df 100644 --- a/botocore/data/translate/2017-07-01/service-2.json +++ b/botocore/data/translate/2017-07-01/service-2.json @@ -423,11 +423,11 @@ "members":{ "Type":{ "shape":"EncryptionKeyType", - "documentation":"

The type of encryption key used by Amazon Translate to encrypt custom terminologies.

" + "documentation":"

The type of encryption key used by Amazon Translate to encrypt this object.

" }, "Id":{ "shape":"EncryptionKeyID", - "documentation":"

The Amazon Resource Name (ARN) of the encryption key being used to encrypt the custom terminology.

" + "documentation":"

The Amazon Resource Name (ARN) of the encryption key being used to encrypt this object.

" } }, "documentation":"

The encryption key used to encrypt this object.

" @@ -461,7 +461,7 @@ }, "DataLocation":{ "shape":"ParallelDataDataLocation", - "documentation":"

The Amazon S3 location of the most recent parallel data input file that was successfully imported into Amazon Translate. The location is returned as a presigned URL that has a 30 minute expiration.

Amazon Translate doesn't scan parallel data input files for the risk of CSV injection attacks.

CSV injection occurs when a .csv or .tsv file is altered so that a record contains malicious code. The record begins with a special character, such as =, +, -, or @. When the file is opened in a spreadsheet program, the program might interpret the record as a formula and run the code within it.

Before you download a parallel data input file from Amazon S3, ensure that you recognize the file and trust its creator.

" + "documentation":"

The Amazon S3 location of the most recent parallel data input file that was successfully imported into Amazon Translate. The location is returned as a presigned URL that has a 30 minute expiration.

Amazon Translate doesn't scan all input files for the risk of CSV injection attacks.

CSV injection occurs when a .csv or .tsv file is altered so that a record contains malicious code. The record begins with a special character, such as =, +, -, or @. When the file is opened in a spreadsheet program, the program might interpret the record as a formula and run the code within it.

Before you download an input file from Amazon S3, ensure that you recognize the file and trust its creator.

" }, "AuxiliaryDataLocation":{ "shape":"ParallelDataDataLocation", @@ -496,7 +496,7 @@ }, "TerminologyDataLocation":{ "shape":"TerminologyDataLocation", - "documentation":"

The data location of the custom terminology being retrieved. The custom terminology file is returned in a presigned url that has a 30 minute expiration.

" + "documentation":"

The Amazon S3 location of the most recent custom terminology input file that was successfully imported into Amazon Translate. The location is returned as a presigned URL that has a 30 minute expiration.

Amazon Translate doesn't scan all input files for the risk of CSV injection attacks.

CSV injection occurs when a .csv or .tsv file is altered so that a record contains malicious code. The record begins with a special character, such as =, +, -, or @. When the file is opened in a spreadsheet program, the program might interpret the record as a formula and run the code within it.

Before you download an input file from Amazon S3, ensure that you recognize the file and trust its creator.

" }, "AuxiliaryDataLocation":{ "shape":"TerminologyDataLocation", @@ -809,7 +809,7 @@ }, "Location":{ "shape":"String", - "documentation":"

The Amazon S3 location of the parallel data input file. The location is returned as a presigned URL to that has a 30 minute expiration.

Amazon Translate doesn't scan parallel data input files for the risk of CSV injection attacks.

CSV injection occurs when a .csv or .tsv file is altered so that a record contains malicious code. The record begins with a special character, such as =, +, -, or @. When the file is opened in a spreadsheet program, the program might interpret the record as a formula and run the code within it.

Before you download a parallel data input file from Amazon S3, ensure that you recognize the file and trust its creator.

" + "documentation":"

The Amazon S3 location of the parallel data input file. The location is returned as a presigned URL to that has a 30 minute expiration.

Amazon Translate doesn't scan all input files for the risk of CSV injection attacks.

CSV injection occurs when a .csv or .tsv file is altered so that a record contains malicious code. The record begins with a special character, such as =, +, -, or @. When the file is opened in a spreadsheet program, the program might interpret the record as a formula and run the code within it.

Before you download an input file from Amazon S3, ensure that you recognize the file and trust its creator.

" } }, "documentation":"

The location of the most recent parallel data input file that was successfully imported into Amazon Translate.

" @@ -907,6 +907,10 @@ "FAILED" ] }, + "Profanity":{ + "type":"string", + "enum":["MASK"] + }, "ResourceName":{ "type":"string", "max":256, @@ -986,6 +990,10 @@ "shape":"ClientTokenString", "documentation":"

A unique identifier for the request. This token is auto-generated when using the Amazon Translate SDK.

", "idempotencyToken":true + }, + "Settings":{ + "shape":"TranslationSettings", + "documentation":"

Settings to configure your translation output, including the option to mask profane words and phrases.

" } } }, @@ -1102,7 +1110,7 @@ }, "Location":{ "shape":"String", - "documentation":"

The location of the custom terminology data.

" + "documentation":"

The Amazon S3 location of the most recent custom terminology input file that was successfully imported into Amazon Translate. The location is returned as a presigned URL that has a 30 minute expiration.

Amazon Translate doesn't scan all input files for the risk of CSV injection attacks.

CSV injection occurs when a .csv or .tsv file is altered so that a record contains malicious code. The record begins with a special character, such as =, +, -, or @. When the file is opened in a spreadsheet program, the program might interpret the record as a formula and run the code within it.

Before you download an input file from Amazon S3, ensure that you recognize the file and trust its creator.

" } }, "documentation":"

The location of the custom terminology data.

" @@ -1266,6 +1274,10 @@ "DataAccessRoleArn":{ "shape":"IamRoleArn", "documentation":"

The Amazon Resource Name (ARN) of an AWS Identity Access and Management (IAM) role that granted Amazon Translate read access to the job's input data.

" + }, + "Settings":{ + "shape":"TranslationSettings", + "documentation":"

Settings that configure the translation output.

" } }, "documentation":"

Provides information about a translation job.

" @@ -1306,6 +1318,10 @@ "TargetLanguageCode":{ "shape":"LanguageCodeString", "documentation":"

The language code requested for the language of the target text. The language must be a language supported by Amazon Translate.

" + }, + "Settings":{ + "shape":"TranslationSettings", + "documentation":"

Settings to configure your translation output, including the option to mask profane words and phrases.

" } } }, @@ -1332,9 +1348,23 @@ "AppliedTerminologies":{ "shape":"AppliedTerminologyList", "documentation":"

The names of the custom terminologies applied to the input text by Amazon Translate for the translated text response.

" + }, + "AppliedSettings":{ + "shape":"TranslationSettings", + "documentation":"

Settings that configure the translation output.

" } } }, + "TranslationSettings":{ + "type":"structure", + "members":{ + "Profanity":{ + "shape":"Profanity", + "documentation":"

Enable the profanity setting if you want Amazon Translate to mask profane words and phrases in your translation output.

To mask profane words and phrases, Amazon Translate replaces them with the grawlix string “?$#@$“. This 5-character sequence is used for each profane word or phrase, regardless of the length or number of words.

Amazon Translate does not detect profanity in all of its supported languages. For languages that support profanity detection, see Supported Languages and Language Codes in the Amazon Translate Developer Guide.

" + } + }, + "documentation":"

Settings that configure the translation output.

" + }, "UnboundedLengthString":{"type":"string"}, "UnsupportedLanguagePairException":{ "type":"structure",