diff --git a/.changes/1.11.8.json b/.changes/1.11.8.json new file mode 100644 index 0000000000..3f4b5de63f --- /dev/null +++ b/.changes/1.11.8.json @@ -0,0 +1,27 @@ +[ + { + "category": "``rds``", + "description": "Update rds client to latest version", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "Update s3 client to latest version", + "type": "api-change" + }, + { + "category": "``appstream``", + "description": "Update appstream client to latest version", + "type": "api-change" + }, + { + "category": "``dynamodb``", + "description": "Update dynamodb client to latest version", + "type": "api-change" + }, + { + "category": "``elb``", + "description": "Update elb client to latest version", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9e75cf0304..a74bb4541d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,16 @@ CHANGELOG ========= +1.11.8 +====== + +* api-change:``rds``: Update rds client to latest version +* api-change:``s3``: Update s3 client to latest version +* api-change:``appstream``: Update appstream client to latest version +* api-change:``dynamodb``: Update dynamodb client to latest version +* api-change:``elb``: Update elb client to latest version + + 1.11.7 ====== diff --git a/botocore/__init__.py b/botocore/__init__.py index fcb2e505c5..8a570e3062 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import re import logging -__version__ = '1.11.7' +__version__ = '1.11.8' class NullHandler(logging.Handler): diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index 5bc8b0affd..062f32e07e 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -296,7 +296,7 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves a list that describes the permissions for a private image that you own.

" + "documentation":"

Retrieves a list that describes the permissions for shared AWS account IDs on a private image that you own.

" }, "DescribeImages":{ "name":"DescribeImages", @@ -310,7 +310,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves a list that describes one or more specified images, if the image names are provided. Otherwise, all images in the account are described.

" + "documentation":"

Retrieves a list that describes one or more specified images, if the image names or image ARNs are provided. Otherwise, all images in the account are described.

" }, "DescribeSessions":{ "name":"DescribeSessions", @@ -616,6 +616,39 @@ }, "documentation":"

Describes an application in the application catalog.

" }, + "ApplicationSettings":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Enables or disables persistent application settings for users during their streaming sessions.

" + }, + "SettingsGroup":{ + "shape":"SettingsGroup", + "documentation":"

The path prefix for the S3 bucket where users’ persistent application settings are stored. You can allow the same persistent application settings to be used across multiple stacks by specifying the same settings group for each stack.

" + } + }, + "documentation":"

The persistent application settings for users of a stack.

" + }, + "ApplicationSettingsResponse":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Specifies whether persistent application settings are enabled for users during their streaming sessions.

" + }, + "SettingsGroup":{ + "shape":"SettingsGroup", + "documentation":"

The path prefix for the S3 bucket where users’ persistent application settings are stored.

" + }, + "S3BucketName":{ + "shape":"String", + "documentation":"

The S3 bucket where users’ persistent application settings are stored. When persistent application settings are enabled for the first time for an account in an AWS Region, an S3 bucket is created. The bucket is unique to the AWS account and the Region.

" + } + }, + "documentation":"

Describes the persistent application settings for users of a stack.

" + }, "Applications":{ "type":"list", "member":{"shape":"Application"} @@ -970,6 +1003,10 @@ "UserSettings":{ "shape":"UserSettingList", "documentation":"

The actions that are enabled or disabled for users during their streaming sessions. By default, these actions are enabled.

" + }, + "ApplicationSettings":{ + "shape":"ApplicationSettings", + "documentation":"

The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.

" } } }, @@ -1270,7 +1307,7 @@ "members":{ "Names":{ "shape":"StringList", - "documentation":"

The names of the images to describe.

" + "documentation":"

The names of the public or private images to describe.

" }, "Arns":{ "shape":"ArnList", @@ -2190,6 +2227,10 @@ "EXPIRED" ] }, + "SettingsGroup":{ + "type":"string", + "max":100 + }, "SharedImagePermissions":{ "type":"structure", "required":[ @@ -2255,6 +2296,10 @@ "UserSettings":{ "shape":"UserSettingList", "documentation":"

The actions that are enabled or disabled for users during their streaming sessions. By default these actions are enabled.

" + }, + "ApplicationSettings":{ + "shape":"ApplicationSettingsResponse", + "documentation":"

The persistent application settings for users of the stack.

" } }, "documentation":"

Describes a stack.

" @@ -2666,6 +2711,10 @@ "UserSettings":{ "shape":"UserSettingList", "documentation":"

The actions that are enabled or disabled for users during their streaming sessions. By default, these actions are enabled.

" + }, + "ApplicationSettings":{ + "shape":"ApplicationSettings", + "documentation":"

The persistent application settings for users of a stack. When these settings are enabled, changes that users make to applications and Windows settings are automatically saved after each session and applied to the next session.

" } } }, diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json index 0d55b6635e..cc8d44a66d 100644 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -26,7 +26,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

If you request more than 100 items BatchGetItem will return a ValidationException with the message \"Too many items requested for the BatchGetItem call\".

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

If you request more than 100 items BatchGetItem will return a ValidationException with the message \"Too many items requested for the BatchGetItem call\".

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "BatchWriteItem":{ "name":"BatchWriteItem", @@ -42,7 +44,9 @@ {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

" + "documentation":"

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem action.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

", + "endpointdiscovery":{ + } }, "CreateBackup":{ "name":"CreateBackup", @@ -60,7 +64,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a backup for an existing table.

Each time you create an On-Demand Backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

When you create an On-Demand Backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table.

If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup may or may not contain data modifications made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal consistency.

Along with data, the following are also included on the backups:

" + "documentation":"

Creates a backup for an existing table.

Each time you create an On-Demand Backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.

When you create an On-Demand Backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.

You can call CreateBackup at a maximum rate of 50 times per second.

All backups in DynamoDB work without consuming any provisioned throughput on the table.

If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup may or may not contain data modifications made between 14:24:00 and 14:26:00. On-Demand Backup does not support causal consistency.

Along with data, the following are also included on the backups:

", + "endpointdiscovery":{ + } }, "CreateGlobalTable":{ "name":"CreateGlobalTable", @@ -76,7 +82,9 @@ {"shape":"GlobalTableAlreadyExistsException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided regions.

If you want to add a new replica table to a global table, each of the following conditions must be true:

If global secondary indexes are specified, then the following conditions must also be met:

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

" + "documentation":"

Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided regions.

If you want to add a new replica table to a global table, each of the following conditions must be true:

If global secondary indexes are specified, then the following conditions must also be met:

Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.

If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.

", + "endpointdiscovery":{ + } }, "CreateTable":{ "name":"CreateTable", @@ -91,7 +99,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.

CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

You can use the DescribeTable action to check the table status.

" + "documentation":"

The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.

CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

You can use the DescribeTable action to check the table status.

", + "endpointdiscovery":{ + } }, "DeleteBackup":{ "name":"DeleteBackup", @@ -107,7 +117,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes an existing backup of a table.

You can call DeleteBackup at a maximum rate of 10 times per second.

" + "documentation":"

Deletes an existing backup of a table.

You can call DeleteBackup at a maximum rate of 10 times per second.

", + "endpointdiscovery":{ + } }, "DeleteItem":{ "name":"DeleteItem", @@ -124,7 +136,9 @@ {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.

In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

Unless you specify conditions, the DeleteItem is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.

Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.

" + "documentation":"

Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.

In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

Unless you specify conditions, the DeleteItem is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.

Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.

", + "endpointdiscovery":{ + } }, "DeleteTable":{ "name":"DeleteTable", @@ -140,7 +154,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

When you delete a table, any indexes on that table are also deleted.

If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

Use the DescribeTable action to check the status of the table.

" + "documentation":"

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

When you delete a table, any indexes on that table are also deleted.

If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

Use the DescribeTable action to check the status of the table.

", + "endpointdiscovery":{ + } }, "DescribeBackup":{ "name":"DescribeBackup", @@ -154,7 +170,9 @@ {"shape":"BackupNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes an existing backup of a table.

You can call DescribeBackup at a maximum rate of 10 times per second.

" + "documentation":"

Describes an existing backup of a table.

You can call DescribeBackup at a maximum rate of 10 times per second.

", + "endpointdiscovery":{ + } }, "DescribeContinuousBackups":{ "name":"DescribeContinuousBackups", @@ -168,7 +186,19 @@ {"shape":"TableNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

You can call DescribeContinuousBackups at a maximum rate of 10 times per second.

" + "documentation":"

Checks the status of continuous backups and point in time recovery on the specified table. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.

You can call DescribeContinuousBackups at a maximum rate of 10 times per second.

", + "endpointdiscovery":{ + } + }, + "DescribeEndpoints":{ + "name":"DescribeEndpoints", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeEndpointsRequest"}, + "output":{"shape":"DescribeEndpointsResponse"}, + "endpointoperation":true }, "DescribeGlobalTable":{ "name":"DescribeGlobalTable", @@ -182,7 +212,9 @@ {"shape":"InternalServerError"}, {"shape":"GlobalTableNotFoundException"} ], - "documentation":"

Returns information about the specified global table.

" + "documentation":"

Returns information about the specified global table.

", + "endpointdiscovery":{ + } }, "DescribeGlobalTableSettings":{ "name":"DescribeGlobalTableSettings", @@ -196,7 +228,9 @@ {"shape":"GlobalTableNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Describes region specific settings for a global table.

" + "documentation":"

Describes region specific settings for a global table.

", + "endpointdiscovery":{ + } }, "DescribeLimits":{ "name":"DescribeLimits", @@ -209,7 +243,9 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns the current provisioned-capacity limits for your AWS account in a region, both for the region as a whole and for any one DynamoDB table that you create there.

When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide.

Although you can increase these limits by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.

For example, you could use one of the AWS SDKs to do the following:

  1. Call DescribeLimits for a particular region to obtain your current account limits on provisioned capacity there.

  2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that region, and one to hold the aggregate write capacity units. Zero them both.

  3. Call ListTables to obtain a list of all your DynamoDB tables.

  4. For each table name listed by ListTables, do the following:

  5. Report the account limits for that region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

This will let you see whether you are getting close to your account-level limits.

The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

For existing tables and their GSIs, DynamoDB will not let you increase provisioned capacity extremely rapidly, but the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.

DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

The DescribeLimits Request element has no content.

" + "documentation":"

Returns the current provisioned-capacity limits for your AWS account in a region, both for the region as a whole and for any one DynamoDB table that you create there.

When you establish an AWS account, the account has initial limits on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given region. Also, there are per-table limits that apply when you create a table there. For more information, see Limits page in the Amazon DynamoDB Developer Guide.

Although you can increase these limits by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits action lets you write code to compare the capacity you are currently using to those limits imposed by your account so that you have enough time to apply for an increase before you hit a limit.

For example, you could use one of the AWS SDKs to do the following:

  1. Call DescribeLimits for a particular region to obtain your current account limits on provisioned capacity there.

  2. Create a variable to hold the aggregate read capacity units provisioned for all your tables in that region, and one to hold the aggregate write capacity units. Zero them both.

  3. Call ListTables to obtain a list of all your DynamoDB tables.

  4. For each table name listed by ListTables, do the following:

  5. Report the account limits for that region returned by DescribeLimits, along with the total current provisioned capacity levels you have calculated.

This will let you see whether you are getting close to your account-level limits.

The per-table limits apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.

For existing tables and their GSIs, DynamoDB will not let you increase provisioned capacity extremely rapidly, but the only upper limit that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account limits.

DescribeLimits should only be called periodically. You can expect throttling errors if you call it more than once in a minute.

The DescribeLimits Request element has no content.

", + "endpointdiscovery":{ + } }, "DescribeTable":{ "name":"DescribeTable", @@ -223,7 +259,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

" + "documentation":"

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

", + "endpointdiscovery":{ + } }, "DescribeTimeToLive":{ "name":"DescribeTimeToLive", @@ -237,7 +275,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

Gives a description of the Time to Live (TTL) status on the specified table.

" + "documentation":"

Gives a description of the Time to Live (TTL) status on the specified table.

", + "endpointdiscovery":{ + } }, "GetItem":{ "name":"GetItem", @@ -252,7 +292,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data and there will be no Item element in the response.

GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

" + "documentation":"

The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data and there will be no Item element in the response.

GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

", + "endpointdiscovery":{ + } }, "ListBackups":{ "name":"ListBackups", @@ -265,7 +307,9 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

List backups associated with an AWS account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1MB worth of items in a page. You can also specify a limit for the maximum number of entries to be returned in a page.

In the request, start time is inclusive but end time is exclusive. Note that these limits are for the time at which the original backup was requested.

You can call ListBackups a maximum of 5 times per second.

" + "documentation":"

List backups associated with an AWS account. To list backups for a given table, specify TableName. ListBackups returns a paginated list of results with at most 1MB worth of items in a page. You can also specify a limit for the maximum number of entries to be returned in a page.

In the request, start time is inclusive but end time is exclusive. Note that these limits are for the time at which the original backup was requested.

You can call ListBackups a maximum of 5 times per second.

", + "endpointdiscovery":{ + } }, "ListGlobalTables":{ "name":"ListGlobalTables", @@ -278,7 +322,9 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists all global tables that have a replica in the specified region.

" + "documentation":"

Lists all global tables that have a replica in the specified region.

", + "endpointdiscovery":{ + } }, "ListTables":{ "name":"ListTables", @@ -291,7 +337,9 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.

" + "documentation":"

Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.

", + "endpointdiscovery":{ + } }, "ListTagsOfResource":{ "name":"ListTagsOfResource", @@ -305,7 +353,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

" + "documentation":"

List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "PutItem":{ "name":"PutItem", @@ -322,7 +372,9 @@ {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

" + "documentation":"

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues parameter.

This topic provides general information about the PutItem API.

For information on how to call the PutItem API using the AWS SDK in specific languages, see the following:

When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists function will only succeed if no matching item exists.

For more information about PutItem, see Working with Items in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "Query":{ "name":"Query", @@ -337,7 +389,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

The Query operation finds items based on primary key values. You can query any table or secondary index that has a composite primary key (a partition key and a sort key).

Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression. To further refine the Query results, you can optionally provide a FilterExpression. A FilterExpression determines which items within the results should be returned to you. All of the other results are discarded.

A Query operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression.

Query results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward parameter to false.

A single Query operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

FilterExpression is applied after a Query finishes, but before the results are returned. A FilterExpression cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression.

A Query operation can return an empty result set and a LastEvaluatedKey if all the items read for the page of results are filtered out.

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

" + "documentation":"

The Query operation finds items based on primary key values. You can query any table or secondary index that has a composite primary key (a partition key and a sort key).

Use the KeyConditionExpression parameter to provide a specific value for the partition key. The Query operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query operation by specifying a sort key value and a comparison operator in KeyConditionExpression. To further refine the Query results, you can optionally provide a FilterExpression. A FilterExpression determines which items within the results should be returned to you. All of the other results are discarded.

A Query operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression.

Query results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward parameter to false.

A single Query operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

FilterExpression is applied after a Query finishes, but before the results are returned. A FilterExpression cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression.

A Query operation can return an empty result set and a LastEvaluatedKey if all the items read for the page of results are filtered out.

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

", + "endpointdiscovery":{ + } }, "RestoreTableFromBackup":{ "name":"RestoreTableFromBackup", @@ -355,7 +409,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

" + "documentation":"

Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

You can call RestoreTableFromBackup at a maximum rate of 10 times per second.

You must manually set up the following on the restored table:

", + "endpointdiscovery":{ + } }, "RestoreTableToPointInTime":{ "name":"RestoreTableToPointInTime", @@ -374,7 +430,9 @@ {"shape":"PointInTimeRecoveryUnavailableException"}, {"shape":"InternalServerError"} ], - "documentation":"

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

You must manually set up the following on the restored table:

" + "documentation":"

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

You must manually set up the following on the restored table:

", + "endpointdiscovery":{ + } }, "Scan":{ "name":"Scan", @@ -389,7 +447,9 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

" + "documentation":"

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

A single Scan operation will read up to the maximum number of items set (if using the Limit parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression. If LastEvaluatedKey is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.

Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

Scan uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan begins, you can set the ConsistentRead parameter to true.

", + "endpointdiscovery":{ + } }, "TagResource":{ "name":"TagResource", @@ -404,7 +464,9 @@ {"shape":"InternalServerError"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

" + "documentation":"

Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "UntagResource":{ "name":"UntagResource", @@ -419,7 +481,9 @@ {"shape":"InternalServerError"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

" + "documentation":"

Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource up to 5 times per second, per account.

For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } }, "UpdateContinuousBackups":{ "name":"UpdateContinuousBackups", @@ -434,7 +498,9 @@ {"shape":"ContinuousBackupsUnavailableException"}, {"shape":"InternalServerError"} ], - "documentation":"

UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days..

" + "documentation":"

UpdateContinuousBackups enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups call returns the current ContinuousBackupsDescription. Continuous backups are ENABLED on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will be set to ENABLED.

Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.

LatestRestorableDateTime is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days..

", + "endpointdiscovery":{ + } }, "UpdateGlobalTable":{ "name":"UpdateGlobalTable", @@ -451,7 +517,9 @@ {"shape":"ReplicaNotFoundException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, must have the same name as the global table, must have the same key schema, and must have DynamoDB Streams enabled and must have same provisioned and maximum write capacity units.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

" + "documentation":"

Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, must have the same name as the global table, must have the same key schema, and must have DynamoDB Streams enabled and must have same provisioned and maximum write capacity units.

Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

If global secondary indexes are specified, then the following conditions must also be met:

", + "endpointdiscovery":{ + } }, "UpdateGlobalTableSettings":{ "name":"UpdateGlobalTableSettings", @@ -469,7 +537,9 @@ {"shape":"ResourceInUseException"}, {"shape":"InternalServerError"} ], - "documentation":"

Updates settings for a global table.

" + "documentation":"

Updates settings for a global table.

", + "endpointdiscovery":{ + } }, "UpdateItem":{ "name":"UpdateItem", @@ -486,7 +556,9 @@ {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).

You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.

" + "documentation":"

Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).

You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.

", + "endpointdiscovery":{ + } }, "UpdateTable":{ "name":"UpdateTable", @@ -502,7 +574,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

You can only perform one of the following operations at once:

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

" + "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

You can only perform one of the following operations at once:

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", + "endpointdiscovery":{ + } }, "UpdateTimeToLive":{ "name":"UpdateTimeToLive", @@ -518,7 +592,9 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

The UpdateTimeToLive method will enable or disable TTL for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification; it may take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1st, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any Local Secondary Index and Global Secondary Index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

" + "documentation":"

The UpdateTimeToLive method will enable or disable TTL for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification; it may take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1st, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any Local Secondary Index and Global Secondary Index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

", + "endpointdiscovery":{ + } } }, "shapes":{ @@ -1484,6 +1560,18 @@ } } }, + "DescribeEndpointsRequest":{ + "type":"structure", + "members":{ + } + }, + "DescribeEndpointsResponse":{ + "type":"structure", + "required":["Endpoints"], + "members":{ + "Endpoints":{"shape":"Endpoints"} + } + }, "DescribeGlobalTableInput":{ "type":"structure", "required":["GlobalTableName"], @@ -1595,6 +1683,21 @@ } }, "Double":{"type":"double"}, + "Endpoint":{ + "type":"structure", + "required":[ + "Address", + "CachePeriodInMinutes" + ], + "members":{ + "Address":{"shape":"String"}, + "CachePeriodInMinutes":{"shape":"Long"} + } + }, + "Endpoints":{ + "type":"list", + "member":{"shape":"Endpoint"} + }, "ErrorMessage":{"type":"string"}, "ExpectedAttributeMap":{ "type":"map", diff --git a/botocore/data/elb/2012-06-01/service-2.json b/botocore/data/elb/2012-06-01/service-2.json index 5ff2b5ebe8..da1a0da77c 100644 --- a/botocore/data/elb/2012-06-01/service-2.json +++ b/botocore/data/elb/2012-06-01/service-2.json @@ -27,7 +27,7 @@ {"shape":"TooManyTagsException"}, {"shape":"DuplicateTagKeysException"} ], - "documentation":"

Adds the specified tags to the specified load balancer. Each load balancer can have a maximum of 10 tags.

Each tag consists of a key and an optional value. If a tag with the same key is already associated with the load balancer, AddTags updates its value.

For more information, see Tag Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Adds the specified tags to the specified load balancer. Each load balancer can have a maximum of 10 tags.

Each tag consists of a key and an optional value. If a tag with the same key is already associated with the load balancer, AddTags updates its value.

For more information, see Tag Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "ApplySecurityGroupsToLoadBalancer":{ "name":"ApplySecurityGroupsToLoadBalancer", @@ -45,7 +45,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"InvalidSecurityGroupException"} ], - "documentation":"

Associates one or more security groups with your load balancer in a virtual private cloud (VPC). The specified security groups override the previously associated security groups.

For more information, see Security Groups for Load Balancers in a VPC in the Classic Load Balancer Guide.

" + "documentation":"

Associates one or more security groups with your load balancer in a virtual private cloud (VPC). The specified security groups override the previously associated security groups.

For more information, see Security Groups for Load Balancers in a VPC in the Classic Load Balancers Guide.

" }, "AttachLoadBalancerToSubnets":{ "name":"AttachLoadBalancerToSubnets", @@ -64,7 +64,7 @@ {"shape":"SubnetNotFoundException"}, {"shape":"InvalidSubnetException"} ], - "documentation":"

Adds one or more subnets to the set of configured subnets for the specified load balancer.

The load balancer evenly distributes requests across all registered subnets. For more information, see Add or Remove Subnets for Your Load Balancer in a VPC in the Classic Load Balancer Guide.

" + "documentation":"

Adds one or more subnets to the set of configured subnets for the specified load balancer.

The load balancer evenly distributes requests across all registered subnets. For more information, see Add or Remove Subnets for Your Load Balancer in a VPC in the Classic Load Balancers Guide.

" }, "ConfigureHealthCheck":{ "name":"ConfigureHealthCheck", @@ -80,7 +80,7 @@ "errors":[ {"shape":"AccessPointNotFoundException"} ], - "documentation":"

Specifies the health check settings to use when evaluating the health state of your EC2 instances.

For more information, see Configure Health Checks for Your Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Specifies the health check settings to use when evaluating the health state of your EC2 instances.

For more information, see Configure Health Checks for Your Load Balancer in the Classic Load Balancers Guide.

" }, "CreateAppCookieStickinessPolicy":{ "name":"CreateAppCookieStickinessPolicy", @@ -99,7 +99,7 @@ {"shape":"TooManyPoliciesException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie. This policy can be associated only with HTTP/HTTPS listeners.

This policy is similar to the policy created by CreateLBCookieStickinessPolicy, except that the lifetime of the special Elastic Load Balancing cookie, AWSELB, follows the lifetime of the application-generated cookie specified in the policy configuration. The load balancer only inserts a new stickiness cookie when the application response includes a new application cookie.

If the application cookie is explicitly removed or expires, the session stops being sticky until a new application cookie is issued.

For more information, see Application-Controlled Session Stickiness in the Classic Load Balancer Guide.

" + "documentation":"

Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie. This policy can be associated only with HTTP/HTTPS listeners.

This policy is similar to the policy created by CreateLBCookieStickinessPolicy, except that the lifetime of the special Elastic Load Balancing cookie, AWSELB, follows the lifetime of the application-generated cookie specified in the policy configuration. The load balancer only inserts a new stickiness cookie when the application response includes a new application cookie.

If the application cookie is explicitly removed or expires, the session stops being sticky until a new application cookie is issued.

For more information, see Application-Controlled Session Stickiness in the Classic Load Balancers Guide.

" }, "CreateLBCookieStickinessPolicy":{ "name":"CreateLBCookieStickinessPolicy", @@ -118,7 +118,7 @@ {"shape":"TooManyPoliciesException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period. This policy can be associated only with HTTP/HTTPS listeners.

When a load balancer implements this policy, the load balancer uses a special cookie to track the instance for each request. When the load balancer receives a request, it first checks to see if this cookie is present in the request. If so, the load balancer sends the request to the application server specified in the cookie. If not, the load balancer sends the request to a server that is chosen based on the existing load-balancing algorithm.

A cookie is inserted into the response for binding subsequent requests from the same user to that server. The validity of the cookie is based on the cookie expiration time, which is specified in the policy configuration.

For more information, see Duration-Based Session Stickiness in the Classic Load Balancer Guide.

" + "documentation":"

Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period. This policy can be associated only with HTTP/HTTPS listeners.

When a load balancer implements this policy, the load balancer uses a special cookie to track the instance for each request. When the load balancer receives a request, it first checks to see if this cookie is present in the request. If so, the load balancer sends the request to the application server specified in the cookie. If not, the load balancer sends the request to a server that is chosen based on the existing load-balancing algorithm.

A cookie is inserted into the response for binding subsequent requests from the same user to that server. The validity of the cookie is based on the cookie expiration time, which is specified in the policy configuration.

For more information, see Duration-Based Session Stickiness in the Classic Load Balancers Guide.

" }, "CreateLoadBalancer":{ "name":"CreateLoadBalancer", @@ -145,7 +145,7 @@ {"shape":"UnsupportedProtocolException"}, {"shape":"OperationNotPermittedException"} ], - "documentation":"

Creates a Classic Load Balancer.

You can add listeners, security groups, subnets, and tags when you create your load balancer, or you can add them later using CreateLoadBalancerListeners, ApplySecurityGroupsToLoadBalancer, AttachLoadBalancerToSubnets, and AddTags.

To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer.

You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Creates a Classic Load Balancer.

You can add listeners, security groups, subnets, and tags when you create your load balancer, or you can add them later using CreateLoadBalancerListeners, ApplySecurityGroupsToLoadBalancer, AttachLoadBalancerToSubnets, and AddTags.

To describe your current load balancers, see DescribeLoadBalancers. When you are finished with a load balancer, you can delete it using DeleteLoadBalancer.

You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "CreateLoadBalancerListeners":{ "name":"CreateLoadBalancerListeners", @@ -165,7 +165,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"UnsupportedProtocolException"} ], - "documentation":"

Creates one or more listeners for the specified load balancer. If a listener with the specified port does not already exist, it is created; otherwise, the properties of the new listener must match the properties of the existing listener.

For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Creates one or more listeners for the specified load balancer. If a listener with the specified port does not already exist, it is created; otherwise, the properties of the new listener must match the properties of the existing listener.

For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "CreateLoadBalancerPolicy":{ "name":"CreateLoadBalancerPolicy", @@ -248,7 +248,7 @@ {"shape":"AccessPointNotFoundException"}, {"shape":"InvalidEndPointException"} ], - "documentation":"

Deregisters the specified instances from the specified load balancer. After the instance is deregistered, it no longer receives traffic from the load balancer.

You can use DescribeLoadBalancers to verify that the instance is deregistered from the load balancer.

For more information, see Register or De-Register EC2 Instances in the Classic Load Balancer Guide.

" + "documentation":"

Deregisters the specified instances from the specified load balancer. After the instance is deregistered, it no longer receives traffic from the load balancer.

You can use DescribeLoadBalancers to verify that the instance is deregistered from the load balancer.

For more information, see Register or De-Register EC2 Instances in the Classic Load Balancers Guide.

" }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", @@ -261,7 +261,7 @@ "shape":"DescribeAccountLimitsOutput", "resultWrapper":"DescribeAccountLimitsResult" }, - "documentation":"

Describes the current Elastic Load Balancing resource limits for your AWS account.

For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Describes the current Elastic Load Balancing resource limits for your AWS account.

For more information, see Limits for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "DescribeInstanceHealth":{ "name":"DescribeInstanceHealth", @@ -395,7 +395,7 @@ {"shape":"AccessPointNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer.

There must be at least one Availability Zone registered with a load balancer at all times. After an Availability Zone is removed, all instances registered with the load balancer that are in the removed Availability Zone go into the OutOfService state. Then, the load balancer attempts to equally balance the traffic among its remaining Availability Zones.

For more information, see Add or Remove Availability Zones in the Classic Load Balancer Guide.

" + "documentation":"

Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC.

For load balancers in a non-default VPC, use DetachLoadBalancerFromSubnets.

There must be at least one Availability Zone registered with a load balancer at all times. After an Availability Zone is removed, all instances registered with the load balancer that are in the removed Availability Zone go into the OutOfService state. Then, the load balancer attempts to equally balance the traffic among its remaining Availability Zones.

For more information, see Add or Remove Availability Zones in the Classic Load Balancers Guide.

" }, "EnableAvailabilityZonesForLoadBalancer":{ "name":"EnableAvailabilityZonesForLoadBalancer", @@ -411,7 +411,7 @@ "errors":[ {"shape":"AccessPointNotFoundException"} ], - "documentation":"

Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer.

The load balancer evenly distributes requests across all its registered Availability Zones that contain instances.

For more information, see Add or Remove Availability Zones in the Classic Load Balancer Guide.

" + "documentation":"

Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC.

For load balancers in a non-default VPC, use AttachLoadBalancerToSubnets.

The load balancer evenly distributes requests across all its registered Availability Zones that contain instances. For more information, see Add or Remove Availability Zones in the Classic Load Balancers Guide.

" }, "ModifyLoadBalancerAttributes":{ "name":"ModifyLoadBalancerAttributes", @@ -429,7 +429,7 @@ {"shape":"LoadBalancerAttributeNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Modifies the attributes of the specified load balancer.

You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining, and CrossZoneLoadBalancing by either enabling or disabling them. Or, you can modify the load balancer attribute ConnectionSettings by specifying an idle connection timeout value for your load balancer.

For more information, see the following in the Classic Load Balancer Guide:

" + "documentation":"

Modifies the attributes of the specified load balancer.

You can modify the load balancer attributes, such as AccessLogs, ConnectionDraining, and CrossZoneLoadBalancing by either enabling or disabling them. Or, you can modify the load balancer attribute ConnectionSettings by specifying an idle connection timeout value for your load balancer.

For more information, see the following in the Classic Load Balancers Guide:

" }, "RegisterInstancesWithLoadBalancer":{ "name":"RegisterInstancesWithLoadBalancer", @@ -446,7 +446,7 @@ {"shape":"AccessPointNotFoundException"}, {"shape":"InvalidEndPointException"} ], - "documentation":"

Adds the specified instances to the specified load balancer.

The instance must be a running instance in the same network as the load balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic instances to that VPC and then register the linked EC2-Classic instances with the load balancer in the VPC.

Note that RegisterInstanceWithLoadBalancer completes when the request has been registered. Instance registration takes a little time to complete. To check the state of the registered instances, use DescribeLoadBalancers or DescribeInstanceHealth.

After the instance is registered, it starts receiving traffic and requests from the load balancer. Any instance that is not in one of the Availability Zones registered for the load balancer is moved to the OutOfService state. If an Availability Zone is added to the load balancer later, any instances registered with the load balancer move to the InService state.

To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer.

For more information, see Register or De-Register EC2 Instances in the Classic Load Balancer Guide.

" + "documentation":"

Adds the specified instances to the specified load balancer.

The instance must be a running instance in the same network as the load balancer (EC2-Classic or the same VPC). If you have EC2-Classic instances and a load balancer in a VPC with ClassicLink enabled, you can link the EC2-Classic instances to that VPC and then register the linked EC2-Classic instances with the load balancer in the VPC.

Note that RegisterInstanceWithLoadBalancer completes when the request has been registered. Instance registration takes a little time to complete. To check the state of the registered instances, use DescribeLoadBalancers or DescribeInstanceHealth.

After the instance is registered, it starts receiving traffic and requests from the load balancer. Any instance that is not in one of the Availability Zones registered for the load balancer is moved to the OutOfService state. If an Availability Zone is added to the load balancer later, any instances registered with the load balancer move to the InService state.

To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer.

For more information, see Register or De-Register EC2 Instances in the Classic Load Balancers Guide.

" }, "RemoveTags":{ "name":"RemoveTags", @@ -482,7 +482,7 @@ {"shape":"InvalidConfigurationRequestException"}, {"shape":"UnsupportedProtocolException"} ], - "documentation":"

Sets the certificate that terminates the specified listener's SSL connections. The specified certificate replaces any prior certificate that was used on the same load balancer and port.

For more information about updating your SSL certificate, see Replace the SSL Certificate for Your Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Sets the certificate that terminates the specified listener's SSL connections. The specified certificate replaces any prior certificate that was used on the same load balancer and port.

For more information about updating your SSL certificate, see Replace the SSL Certificate for Your Load Balancer in the Classic Load Balancers Guide.

" }, "SetLoadBalancerPoliciesForBackendServer":{ "name":"SetLoadBalancerPoliciesForBackendServer", @@ -500,7 +500,7 @@ {"shape":"PolicyNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Replaces the set of policies associated with the specified port on which the EC2 instance is listening with a new set of policies. At this time, only the back-end server authentication policy type can be applied to the instance ports; this policy type is composed of multiple public key policies.

Each time you use SetLoadBalancerPoliciesForBackendServer to enable the policies, use the PolicyNames parameter to list the policies that you want to enable.

You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify that the policy is associated with the EC2 instance.

For more information about enabling back-end instance authentication, see Configure Back-end Instance Authentication in the Classic Load Balancer Guide. For more information about Proxy Protocol, see Configure Proxy Protocol Support in the Classic Load Balancer Guide.

" + "documentation":"

Replaces the set of policies associated with the specified port on which the EC2 instance is listening with a new set of policies. At this time, only the back-end server authentication policy type can be applied to the instance ports; this policy type is composed of multiple public key policies.

Each time you use SetLoadBalancerPoliciesForBackendServer to enable the policies, use the PolicyNames parameter to list the policies that you want to enable.

You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify that the policy is associated with the EC2 instance.

For more information about enabling back-end instance authentication, see Configure Back-end Instance Authentication in the Classic Load Balancers Guide. For more information about Proxy Protocol, see Configure Proxy Protocol Support in the Classic Load Balancers Guide.

" }, "SetLoadBalancerPoliciesOfListener":{ "name":"SetLoadBalancerPoliciesOfListener", @@ -519,7 +519,7 @@ {"shape":"ListenerNotFoundException"}, {"shape":"InvalidConfigurationRequestException"} ], - "documentation":"

Replaces the current set of policies for the specified load balancer port with the specified set of policies.

To enable back-end server authentication, use SetLoadBalancerPoliciesForBackendServer.

For more information about setting policies, see Update the SSL Negotiation Configuration, Duration-Based Session Stickiness, and Application-Controlled Session Stickiness in the Classic Load Balancer Guide.

" + "documentation":"

Replaces the current set of policies for the specified load balancer port with the specified set of policies.

To enable back-end server authentication, use SetLoadBalancerPoliciesForBackendServer.

For more information about setting policies, see Update the SSL Negotiation Configuration, Duration-Based Session Stickiness, and Application-Controlled Session Stickiness in the Classic Load Balancers Guide.

" } }, "shapes":{ @@ -828,7 +828,7 @@ }, "Listeners":{ "shape":"Listeners", - "documentation":"

The listeners.

For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

The listeners.

For more information, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "AvailabilityZones":{ "shape":"AvailabilityZones", @@ -848,7 +848,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

A list of tags to assign to the load balancer.

For more information about tagging your load balancer, see Tag Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

A list of tags to assign to the load balancer.

For more information about tagging your load balancer, see Tag Your Classic Load Balancer in the Classic Load Balancers Guide.

" } }, "documentation":"

Contains the parameters for CreateLoadBalancer.

" @@ -1061,6 +1061,7 @@ "type":"structure", "members":{ }, + "documentation":"

A request made by Elastic Load Balancing to another service exceeds the maximum request rate permitted for your account.

", "error":{ "code":"DependencyThrottle", "httpStatusCode":400, @@ -1526,7 +1527,7 @@ "members":{ "Name":{ "shape":"Name", - "documentation":"

The name of the limit. The possible values are:

" + "documentation":"

The name of the limit. The possible values are:

" }, "Max":{ "shape":"Max", @@ -1568,7 +1569,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the server certificate.

" } }, - "documentation":"

Information about a listener.

For information about the protocols and the ports supported by Elastic Load Balancing, see Listeners for Your Classic Load Balancer in the Classic Load Balancer Guide.

" + "documentation":"

Information about a listener.

For information about the protocols and the ports supported by Elastic Load Balancing, see Listeners for Your Classic Load Balancer in the Classic Load Balancers Guide.

" }, "ListenerDescription":{ "type":"structure", @@ -1621,19 +1622,19 @@ "members":{ "CrossZoneLoadBalancing":{ "shape":"CrossZoneLoadBalancing", - "documentation":"

If enabled, the load balancer routes the request traffic evenly across all instances regardless of the Availability Zones.

For more information, see Configure Cross-Zone Load Balancing in the Classic Load Balancer Guide.

" + "documentation":"

If enabled, the load balancer routes the request traffic evenly across all instances regardless of the Availability Zones.

For more information, see Configure Cross-Zone Load Balancing in the Classic Load Balancers Guide.

" }, "AccessLog":{ "shape":"AccessLog", - "documentation":"

If enabled, the load balancer captures detailed information of all requests and delivers the information to the Amazon S3 bucket that you specify.

For more information, see Enable Access Logs in the Classic Load Balancer Guide.

" + "documentation":"

If enabled, the load balancer captures detailed information of all requests and delivers the information to the Amazon S3 bucket that you specify.

For more information, see Enable Access Logs in the Classic Load Balancers Guide.

" }, "ConnectionDraining":{ "shape":"ConnectionDraining", - "documentation":"

If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy instance.

For more information, see Configure Connection Draining in the Classic Load Balancer Guide.

" + "documentation":"

If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy instance.

For more information, see Configure Connection Draining in the Classic Load Balancers Guide.

" }, "ConnectionSettings":{ "shape":"ConnectionSettings", - "documentation":"

If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration.

By default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see Configure Idle Connection Timeout in the Classic Load Balancer Guide.

" + "documentation":"

If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration.

By default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see Configure Idle Connection Timeout in the Classic Load Balancers Guide.

" }, "AdditionalAttributes":{ "shape":"AdditionalAttributes", @@ -1655,7 +1656,7 @@ }, "CanonicalHostedZoneName":{ "shape":"DNSName", - "documentation":"

The DNS name of the load balancer.

For more information, see Configure a Custom Domain Name in the Classic Load Balancer Guide.

" + "documentation":"

The DNS name of the load balancer.

For more information, see Configure a Custom Domain Name in the Classic Load Balancers Guide.

" }, "CanonicalHostedZoneNameID":{ "shape":"DNSName", diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 944c1723f3..1fbedc9106 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -147,7 +147,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.

For more information on Amazon Aurora, see the Amazon Aurora User Guide.

" + "documentation":"

Copies a snapshot of a DB cluster.

To copy a DB cluster snapshot from a shared manual DB cluster snapshot, SourceDBClusterSnapshotIdentifier must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.

You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the CopyDBClusterSnapshot action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:

To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster snapshot is in \"copying\" status.

For more information on copying encrypted DB cluster snapshots from one AWS Region to another, see Copying a Snapshot in the Amazon Aurora User Guide.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "CopyDBParameterGroup":{ "name":"CopyDBParameterGroup", @@ -233,7 +233,7 @@ {"shape":"DBInstanceNotFoundFault"}, {"shape":"DBSubnetGroupDoesNotCoverEnoughAZs"} ], - "documentation":"

Creates a new Amazon Aurora DB cluster.

You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon RDS MySQL DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, you must also specify the PreSignedUrl parameter.

For more information on Amazon Aurora, see the Amazon Aurora User Guide.

" + "documentation":"

Creates a new Amazon Aurora DB cluster.

You can use the ReplicationSourceIdentifier parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon RDS MySQL DB instance. For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier is encrypted, you must also specify the PreSignedUrl parameter.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "CreateDBClusterParameterGroup":{ "name":"CreateDBClusterParameterGroup", @@ -250,7 +250,7 @@ {"shape":"DBParameterGroupQuotaExceededFault"}, {"shape":"DBParameterGroupAlreadyExistsFault"} ], - "documentation":"

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Creates a new DB cluster parameter group.

Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.

A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using ModifyDBClusterParameterGroup. Once you've created a DB cluster parameter group, you need to associate it with your DB cluster using ModifyDBCluster. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "CreateDBClusterSnapshot":{ "name":"CreateDBClusterSnapshot", @@ -270,7 +270,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"InvalidDBClusterSnapshotStateFault"} ], - "documentation":"

Creates a snapshot of a DB cluster. For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Creates a snapshot of a DB cluster. For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "CreateDBInstance":{ "name":"CreateDBInstance", @@ -470,7 +470,7 @@ {"shape":"SnapshotQuotaExceededFault"}, {"shape":"InvalidDBClusterSnapshotStateFault"} ], - "documentation":"

The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DeleteDBClusterParameterGroup":{ "name":"DeleteDBClusterParameterGroup", @@ -483,7 +483,7 @@ {"shape":"InvalidDBParameterGroupStateFault"}, {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DeleteDBClusterSnapshot":{ "name":"DeleteDBClusterSnapshot", @@ -500,7 +500,7 @@ {"shape":"InvalidDBClusterSnapshotStateFault"}, {"shape":"DBClusterSnapshotNotFoundFault"} ], - "documentation":"

Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

The DB cluster snapshot must be in the available state to be deleted.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.

The DB cluster snapshot must be in the available state to be deleted.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DeleteDBInstance":{ "name":"DeleteDBInstance", @@ -653,7 +653,7 @@ {"shape":"DBClusterNotFoundFault"}, {"shape":"DBClusterBacktrackNotFoundFault"} ], - "documentation":"

Returns information about backtracks for a DB cluster.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Returns information about backtracks for a DB cluster.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBClusterParameterGroups":{ "name":"DescribeDBClusterParameterGroups", @@ -669,7 +669,7 @@ "errors":[ {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list will contain only the description of the specified DB cluster parameter group.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Returns a list of DBClusterParameterGroup descriptions. If a DBClusterParameterGroupName parameter is specified, the list will contain only the description of the specified DB cluster parameter group.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBClusterParameters":{ "name":"DescribeDBClusterParameters", @@ -685,7 +685,7 @@ "errors":[ {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Returns the detailed parameter list for a particular DB cluster parameter group.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Returns the detailed parameter list for a particular DB cluster parameter group.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBClusterSnapshotAttributes":{ "name":"DescribeDBClusterSnapshotAttributes", @@ -717,7 +717,7 @@ "errors":[ {"shape":"DBClusterSnapshotNotFoundFault"} ], - "documentation":"

Returns information about DB cluster snapshots. This API action supports pagination.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Returns information about DB cluster snapshots. This API action supports pagination.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBClusters":{ "name":"DescribeDBClusters", @@ -733,7 +733,7 @@ "errors":[ {"shape":"DBClusterNotFoundFault"} ], - "documentation":"

Returns information about provisioned Aurora DB clusters. This API supports pagination.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Returns information about provisioned Aurora DB clusters. This API supports pagination.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeDBEngineVersions":{ "name":"DescribeDBEngineVersions", @@ -887,7 +887,7 @@ "shape":"DescribeEngineDefaultClusterParametersResult", "resultWrapper":"DescribeEngineDefaultClusterParametersResult" }, - "documentation":"

Returns the default engine and system parameter information for the cluster database engine.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Returns the default engine and system parameter information for the cluster database engine.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "DescribeEngineDefaultParameters":{ "name":"DescribeEngineDefaultParameters", @@ -1097,7 +1097,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"InvalidDBInstanceStateFault"} ], - "documentation":"

Forces a failover for a DB cluster.

A failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).

Amazon Aurora will automatically fail over to an Aurora Replica, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a primary instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Forces a failover for a DB cluster.

A failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).

Amazon Aurora will automatically fail over to an Aurora Replica, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a primary instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1159,7 +1159,7 @@ {"shape":"InvalidDBInstanceStateFault"}, {"shape":"DBClusterAlreadyExistsFault"} ], - "documentation":"

Modify a setting for an Amazon Aurora DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Modify a setting for an Amazon Aurora DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "ModifyDBClusterParameterGroup":{ "name":"ModifyDBClusterParameterGroup", @@ -1176,7 +1176,7 @@ {"shape":"DBParameterGroupNotFoundFault"}, {"shape":"InvalidDBParameterGroupStateFault"} ], - "documentation":"

Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB cluster associated with the parameter group before the change can take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

" + "documentation":"

Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB cluster associated with the parameter group before the change can take effect.

After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon RDS to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBClusterParameters command to verify that your DB cluster parameter group has been created or modified.

" }, "ModifyDBClusterSnapshotAttribute":{ "name":"ModifyDBClusterSnapshotAttribute", @@ -1466,7 +1466,7 @@ {"shape":"InvalidDBParameterGroupStateFault"}, {"shape":"DBParameterGroupNotFoundFault"} ], - "documentation":"

Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request. You must call RebootDBInstance for every DB instance in your DB cluster that you want the updated static parameter to apply to.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: ParameterName and ApplyMethod. To reset the entire DB cluster parameter group, specify the DBClusterParameterGroupName and ResetAllParameters parameters.

When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to pending-reboot to take effect on the next DB instance restart or RebootDBInstance request. You must call RebootDBInstance for every DB instance in your DB cluster that you want the updated static parameter to apply to.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "ResetDBParameterGroup":{ "name":"ResetDBParameterGroup", @@ -1544,7 +1544,7 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"} ], - "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.

If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "RestoreDBClusterToPointInTime":{ "name":"RestoreDBClusterToPointInTime", @@ -1576,7 +1576,7 @@ {"shape":"StorageQuotaExceededFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"} ], - "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

For more information on Amazon Aurora, see the Amazon Aurora User Guide .

" + "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime action has completed and the DB cluster is available.

For more information on Amazon Aurora, see What Is Amazon Aurora? in the Amazon Aurora User Guide.

" }, "RestoreDBInstanceFromDBSnapshot":{ "name":"RestoreDBInstanceFromDBSnapshot", diff --git a/botocore/data/s3/2006-03-01/service-2.json b/botocore/data/s3/2006-03-01/service-2.json index 6ac6bf10da..6af8d80a32 100644 --- a/botocore/data/s3/2006-03-01/service-2.json +++ b/botocore/data/s3/2006-03-01/service-2.json @@ -3693,6 +3693,10 @@ "JSON":{ "shape":"JSONInput", "documentation":"

Specifies JSON as object's input serialization format.

" + }, + "Parquet":{ + "shape":"ParquetInput", + "documentation":"

Specifies Parquet as object's input serialization format.

" } }, "documentation":"

Describes the serialization format of the object.

" @@ -5002,6 +5006,11 @@ "type":"string", "enum":["Destination"] }, + "ParquetInput":{ + "type":"structure", + "members":{ + } + }, "Part":{ "type":"structure", "members":{ diff --git a/docs/source/conf.py b/docs/source/conf.py index a88d179d7a..8b004785cf 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ # The short X.Y version. version = '1.11' # The full version, including alpha/beta/rc tags. -release = '1.11.7' +release = '1.11.8' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.