From 020182a8434fc9c370885696db30b481ee50bfc3 Mon Sep 17 00:00:00 2001 From: Jonathan Eskew Date: Tue, 14 Jul 2015 13:49:18 -0700 Subject: [PATCH] Update models for next release --- CHANGELOG.md | 15 + Makefile | 3 + features/smoke/devicefarm/devicefarm.feature | 12 + .../dynamodbstreams/dynamodbstreams.feature | 16 + src/DeviceFarm/DeviceFarmClient.php | 9 + .../Exception/DeviceFarmException.php | 9 + src/DynamoDbStreams/DynamoDbStreamsClient.php | 20 + .../Exception/DynamoDbStreamsException.php | 9 + src/data/devicefarm/2015-06-23/api-2.json | 1582 +++++++++++++++++ src/data/devicefarm/2015-06-23/docs-2.json | 878 +++++++++ .../devicefarm/2015-06-23/paginators-1.json | 64 + src/data/dynamodb/2012-08-10/api-2.json | 44 +- src/data/dynamodb/2012-08-10/docs-2.json | 108 +- .../dynamodb/2012-08-10/paginators-1.json | 1 + src/data/manifest.json | 14 + .../streams.dynamodb/2012-08-10/api-2.json | 436 +++++ .../streams.dynamodb/2012-08-10/docs-2.json | 353 ++++ tests/Integ/SmokeContext.php | 3 + 18 files changed, 3534 insertions(+), 42 deletions(-) create mode 100644 features/smoke/devicefarm/devicefarm.feature create mode 100644 features/smoke/dynamodbstreams/dynamodbstreams.feature create mode 100644 src/DeviceFarm/DeviceFarmClient.php create mode 100644 src/DeviceFarm/Exception/DeviceFarmException.php create mode 100644 src/DynamoDbStreams/DynamoDbStreamsClient.php create mode 100644 src/DynamoDbStreams/Exception/DynamoDbStreamsException.php create mode 100644 src/data/devicefarm/2015-06-23/api-2.json create mode 100644 src/data/devicefarm/2015-06-23/docs-2.json create mode 100644 src/data/devicefarm/2015-06-23/paginators-1.json create mode 100644 src/data/streams.dynamodb/2012-08-10/api-2.json create mode 100644 src/data/streams.dynamodb/2012-08-10/docs-2.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 98f9a289e4..aa3b836cbe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # CHANGELOG +## next release + +* `Aws\DeviceFarm` - Added support for AWS DeviceFarm, an app testing service + that enables you to test your Android and Fire OS apps on real, physical + phones and tablets that are hosted by AWS. +* `Aws\DynamoDb` - Added support for consistent scans and update streams. +* `Aws\DynamoDbStreams` - Added support for Amazon DynamoDB Streams, giving you + the ability to subscribe to the transactional log of all changes transpiring + in your DynamoDB table. +* `Aws\S3` - Fixed checksum encoding on multipart upload of non-seekable + streams. +* `Aws\S3\StreamWrapper` - Added guard on rename functionality to ensure wrapper + initialized. + + ## 3.1.0 - 2015-07-09 * `Aws\CodeCommit` - Added support for AWS CodeCommit, a secure, highly diff --git a/Makefile b/Makefile index 8d3c1bab32..d5a8d418ff 100644 --- a/Makefile +++ b/Makefile @@ -39,6 +39,9 @@ coverage-show: integ: vendor/bin/phpunit --debug --testsuite=integ $(TEST) +smoke: + vendor/bin/behat --format=progress + # Packages the phar and zip package: php build/packager.php $(SERVICE) diff --git a/features/smoke/devicefarm/devicefarm.feature b/features/smoke/devicefarm/devicefarm.feature new file mode 100644 index 0000000000..eb449e5c6a --- /dev/null +++ b/features/smoke/devicefarm/devicefarm.feature @@ -0,0 +1,12 @@ +# language: en +@devicefarm +Feature: AWS Device Farm + + Scenario: Making a request + When I call the "ListProjects" API + Then the value at "projects" should be a list + + Scenario: Error handling + When I attempt to call the "GetDevicePool" API with: + | arn | bogus-arn | + Then I expect the response error code to be "ValidationException" \ No newline at end of file diff --git a/features/smoke/dynamodbstreams/dynamodbstreams.feature b/features/smoke/dynamodbstreams/dynamodbstreams.feature new file mode 100644 index 0000000000..07e6e956d9 --- /dev/null +++ b/features/smoke/dynamodbstreams/dynamodbstreams.feature @@ -0,0 +1,16 @@ +# language: en +@dynamodbstreams +Feature: Amazon DynamoDB + + Scenario: Making a request + When I call the "ListStreams" API + Then the value at "Streams" should be a list + + Scenario: Handling errors + When I attempt to call the "DescribeStream" API with: + | StreamArn | fake-stream | + Then I expect the response error code to be "ValidationException" + And I expect the response error message to include: + """ + Invalid StreamArn + """ diff --git a/src/DeviceFarm/DeviceFarmClient.php b/src/DeviceFarm/DeviceFarmClient.php new file mode 100644 index 0000000000..cd3cadf4de --- /dev/null +++ b/src/DeviceFarm/DeviceFarmClient.php @@ -0,0 +1,9 @@ +Creates a device pool.

", + "CreateProject": "

Creates a new project.

", + "CreateUpload": "

Uploads an app or test scripts.

", + "GetDevice": "

Gets information about a unique device type.

", + "GetDevicePool": "

Gets information about a device pool.

", + "GetDevicePoolCompatibility": "

Gets information about compatibility with a device pool.

", + "GetJob": "

Gets information about a job.

", + "GetProject": "

Gets information about a project.

", + "GetRun": "

Gets information about a run.

", + "GetSuite": "

Gets information about a suite.

", + "GetTest": "

Gets information about a test.

", + "GetUpload": "

Gets information about an upload.

", + "ListArtifacts": "

Gets information about artifacts.

", + "ListDevicePools": "

Gets information about device pools.

", + "ListDevices": "

Gets information about unique device types.

", + "ListJobs": "

Gets information about jobs.

", + "ListProjects": "

Gets information about projects.

", + "ListRuns": "

Gets information about runs.

", + "ListSamples": "

Gets information about samples.

", + "ListSuites": "

Gets information about suites.

", + "ListTests": "

Gets information about tests.

", + "ListUniqueProblems": "

Gets information about unique problems.

", + "ListUploads": "

Gets information about uploads.

", + "ScheduleRun": "

Schedules a run.

" + }, + "service": "

AWS Device Farm is a service that enables mobile app developers to test Android and Fire OS apps on physical phones, tablets, and other devices in the cloud.

", + "shapes": { + "AmazonResourceName": { + "base": null, + "refs": { + "AmazonResourceNames$member": null, + "Artifact$arn": "

The artifact's ARN.

", + "CreateDevicePoolRequest$projectArn": "

The ARN of the project for the device pool.

", + "CreateUploadRequest$projectArn": "

The ARN of the project for the upload.

", + "Device$arn": "

The device's ARN.

", + "DevicePool$arn": "

The device pool's ARN.

", + "GetDevicePoolCompatibilityRequest$devicePoolArn": "

The device pool's ARN.

", + "GetDevicePoolCompatibilityRequest$appArn": "

The ARN of the app that is associated with the specified device pool.

", + "GetDevicePoolRequest$arn": "

The device pool's ARN.

", + "GetDeviceRequest$arn": "

The device type's ARN.

", + "GetJobRequest$arn": "

The job's ARN.

", + "GetProjectRequest$arn": "

The project's ARN.

", + "GetRunRequest$arn": "

The run's ARN.

", + "GetSuiteRequest$arn": "

The suite's ARN.

", + "GetTestRequest$arn": "

The test's ARN.

", + "GetUploadRequest$arn": "

The upload's ARN.

", + "Job$arn": "

The job's ARN.

", + "ListArtifactsRequest$arn": "

The artifacts' ARNs.

", + "ListDevicePoolsRequest$arn": "

The project ARN.

", + "ListDevicesRequest$arn": "

The device types' ARNs.

", + "ListJobsRequest$arn": "

The jobs' ARNs.

", + "ListProjectsRequest$arn": "

The projects' ARNs.

", + "ListRunsRequest$arn": "

The runs' ARNs.

", + "ListSamplesRequest$arn": "

The samples' ARNs.

", + "ListSuitesRequest$arn": "

The suites' ARNs.

", + "ListTestsRequest$arn": "

The tests' ARNs.

", + "ListUniqueProblemsRequest$arn": "

The unique problems' ARNs.

", + "ListUploadsRequest$arn": "

The uploads' ARNs.

", + "ProblemDetail$arn": "

The problem detail's ARN.

", + "Project$arn": "

The project's ARN.

", + "Run$arn": "

The run's ARN.

", + "Sample$arn": "

The sample's ARN.

", + "ScheduleRunConfiguration$extraDataPackageArn": "

The ARN of the extra data for the run. The extra data is a .zip file that AWS Device Farm will extract to external data.

", + "ScheduleRunConfiguration$networkProfileArn": "

Reserved for internal use.

", + "ScheduleRunRequest$projectArn": "

The ARN of the project for the run to be scheduled.

", + "ScheduleRunRequest$appArn": "

The ARN of the app to schedule a run.

", + "ScheduleRunRequest$devicePoolArn": "

The ARN of the device pool for the run to be scheduled.

", + "ScheduleRunTest$testPackageArn": "

The ARN of the uploaded test that will be run.

", + "Suite$arn": "

The suite's ARN.

", + "Test$arn": "

The test's ARN.

", + "Upload$arn": "

The upload's ARN.

" + } + }, + "AmazonResourceNames": { + "base": null, + "refs": { + "ScheduleRunConfiguration$auxiliaryApps": "

A list of auxiliary apps for the run.

" + } + }, + "ArgumentException": { + "base": "

An invalid argument was specified.

", + "refs": { + } + }, + "Artifact": { + "base": "

Represents the output of a test. Examples of artifacts include logs and screenshots.

", + "refs": { + "Artifacts$member": null + } + }, + "ArtifactCategory": { + "base": null, + "refs": { + "ListArtifactsRequest$type": "

The artifacts' type.

Allowed values include:

" + } + }, + "ArtifactType": { + "base": null, + "refs": { + "Artifact$type": "

The artifact's type.

Allowed values include the following:

" + } + }, + "Artifacts": { + "base": null, + "refs": { + "ListArtifactsResult$artifacts": "

Information about the artifacts.

" + } + }, + "Boolean": { + "base": null, + "refs": { + "DevicePoolCompatibilityResult$compatible": "

Whether the result was compatible with the device pool.

", + "Radios$wifi": "

True if Wi-Fi is enabled at the beginning of the test; otherwise, false.

", + "Radios$bluetooth": "

True if Bluetooth is enabled at the beginning of the test; otherwise, false.

", + "Radios$nfc": "

True if NFC is enabled at the beginning of the test; otherwise, false.

", + "Radios$gps": "

True if GPS is enabled at the beginning of the test; otherwise, false.

" + } + }, + "CPU": { + "base": "

Represents the amount of CPU that an app is using on a physical device.

Note that this does not represent system-wide CPU usage.

", + "refs": { + "Device$cpu": "

Information about the device's CPU.

" + } + }, + "ContentType": { + "base": null, + "refs": { + "CreateUploadRequest$contentType": "

The upload's content type (for example, \"application/octet-stream\").

", + "Upload$contentType": "

The upload's content type (for example, \"application/octet-stream\").

" + } + }, + "Counters": { + "base": "

Represents entity counters.

", + "refs": { + "Job$counters": "

The job's result counters.

", + "Run$counters": "

The run's result counters.

", + "Suite$counters": "

The suite's result counters.

", + "Test$counters": "

The test's result counters.

" + } + }, + "CreateDevicePoolRequest": { + "base": "

Represents a request to the create device pool operation.

", + "refs": { + } + }, + "CreateDevicePoolResult": { + "base": "

Represents the result of a create device pool request.

", + "refs": { + } + }, + "CreateProjectRequest": { + "base": "

Represents a request to the create project operation.

", + "refs": { + } + }, + "CreateProjectResult": { + "base": "

Represents the result of a create project request.

", + "refs": { + } + }, + "CreateUploadRequest": { + "base": "

Represents a request to the create upload operation.

", + "refs": { + } + }, + "CreateUploadResult": { + "base": "

Represents the result of a create upload request.

", + "refs": { + } + }, + "DateTime": { + "base": null, + "refs": { + "Job$created": "

When the job was created.

", + "Job$started": "

The job's start time.

", + "Job$stopped": "

The job's stop time.

", + "Project$created": "

When the project was created.

", + "Run$created": "

When the run was created.

", + "Run$started": "

The run's start time.

", + "Run$stopped": "

The run's stop time.

", + "Suite$created": "

When the suite was created.

", + "Suite$started": "

The suite's start time.

", + "Suite$stopped": "

The suite's stop time.

", + "Test$created": "

When the test was created.

", + "Test$started": "

The test's start time.

", + "Test$stopped": "

The test's stop time.

", + "Upload$created": "

When the upload was created.

" + } + }, + "Device": { + "base": "

Represents a device type that an app is tested against.

", + "refs": { + "DevicePoolCompatibilityResult$device": null, + "Devices$member": null, + "GetDeviceResult$device": null, + "Job$device": null, + "Problem$device": "

Information about the associated device.

" + } + }, + "DeviceAttribute": { + "base": null, + "refs": { + "IncompatibilityMessage$type": "

The type of incompatibility.

Allowed values include:

", + "Rule$attribute": "

The rule's attribute.

Allowed values include:

" + } + }, + "DeviceFormFactor": { + "base": null, + "refs": { + "Device$formFactor": "

The device's form factor.

Allowed values include:

" + } + }, + "DevicePlatform": { + "base": null, + "refs": { + "Device$platform": "

The device's platform.

Allowed values include:

", + "Run$platform": "

The run's platform.

Allowed values include:

" + } + }, + "DevicePool": { + "base": "

Represents a collection of device types.

", + "refs": { + "CreateDevicePoolResult$devicePool": "

The newly created device pool.

", + "DevicePools$member": null, + "GetDevicePoolResult$devicePool": null + } + }, + "DevicePoolCompatibilityResult": { + "base": "

Represents a device pool compatibility result.

", + "refs": { + "DevicePoolCompatibilityResults$member": null + } + }, + "DevicePoolCompatibilityResults": { + "base": null, + "refs": { + "GetDevicePoolCompatibilityResult$compatibleDevices": "

Information about compatible devices.

", + "GetDevicePoolCompatibilityResult$incompatibleDevices": "

Information about incompatible devices.

" + } + }, + "DevicePoolType": { + "base": null, + "refs": { + "DevicePool$type": "

The device pool's type.

Allowed values include:

", + "ListDevicePoolsRequest$type": "

The device pools' type.

Allowed values include:

" + } + }, + "DevicePools": { + "base": null, + "refs": { + "ListDevicePoolsResult$devicePools": "

Information about the device pools.

" + } + }, + "Devices": { + "base": null, + "refs": { + "ListDevicesResult$devices": "

Information about the devices.

" + } + }, + "Double": { + "base": null, + "refs": { + "CPU$clock": "

The clock speed of the device's CPU, expressed in hertz (Hz). For example, a 1.2 GHz CPU is expressed as 1200000000.

", + "Location$latitude": "

The latitude.

", + "Location$longitude": "

The longitude.

" + } + }, + "ExecutionResult": { + "base": null, + "refs": { + "Job$result": "

The job's result.

Allowed values include:

", + "Problem$result": "

The problem's result.

Allowed values include:

", + "Run$result": "

The run's result.

Allowed values include:

", + "Suite$result": "

The suite's result.

Allowed values include:

", + "Test$result": "

The test's result.

Allowed values include:

", + "UniqueProblemsByExecutionResultMap$key": null + } + }, + "ExecutionStatus": { + "base": null, + "refs": { + "Job$status": "

The job's status.

Allowed values include:

", + "Run$status": "

The run's status.

Allowed values include:

", + "Suite$status": "

The suite's status.

Allowed values include:

", + "Test$status": "

The test's status.

Allowed values include:

" + } + }, + "Filter": { + "base": null, + "refs": { + "ScheduleRunTest$filter": "

The test's filter.

" + } + }, + "GetDevicePoolCompatibilityRequest": { + "base": "

Represents a request to the get device pool compatibility operation.

", + "refs": { + } + }, + "GetDevicePoolCompatibilityResult": { + "base": "

Represents the result of describe device pool compatibility request.

", + "refs": { + } + }, + "GetDevicePoolRequest": { + "base": "

Represents a request to the get device pool operation.

", + "refs": { + } + }, + "GetDevicePoolResult": { + "base": "

Represents the result of a get device pool request.

", + "refs": { + } + }, + "GetDeviceRequest": { + "base": "

Represents a request to the get device request.

", + "refs": { + } + }, + "GetDeviceResult": { + "base": "

Represents the result of a get device request.

", + "refs": { + } + }, + "GetJobRequest": { + "base": "

Represents a request to the get job operation.

", + "refs": { + } + }, + "GetJobResult": { + "base": "

Represents the result of a get job request.

", + "refs": { + } + }, + "GetProjectRequest": { + "base": "

Represents a request to the get project operation.

", + "refs": { + } + }, + "GetProjectResult": { + "base": "

Represents the result of a get project request.

", + "refs": { + } + }, + "GetRunRequest": { + "base": "

Represents a request to the get run operation.

", + "refs": { + } + }, + "GetRunResult": { + "base": "

Represents the result of a get run request.

", + "refs": { + } + }, + "GetSuiteRequest": { + "base": "

Represents a request to the get suite operation.

", + "refs": { + } + }, + "GetSuiteResult": { + "base": "

Represents the result of a get suite request.

", + "refs": { + } + }, + "GetTestRequest": { + "base": "

Represents a request to the get test operation.

", + "refs": { + } + }, + "GetTestResult": { + "base": "

Represents the result of a get test request.

", + "refs": { + } + }, + "GetUploadRequest": { + "base": "

Represents a request to the get upload operation.

", + "refs": { + } + }, + "GetUploadResult": { + "base": "

Represents the result of a get upload request.

", + "refs": { + } + }, + "IdempotencyException": { + "base": "

An entity with the same name already exists.

", + "refs": { + } + }, + "IncompatibilityMessage": { + "base": "

Represents information about incompatibility.

", + "refs": { + "IncompatibilityMessages$member": null + } + }, + "IncompatibilityMessages": { + "base": null, + "refs": { + "DevicePoolCompatibilityResult$incompatibilityMessages": "

Information about the compatibility.

" + } + }, + "Integer": { + "base": null, + "refs": { + "Counters$total": "

The total number of entities.

", + "Counters$passed": "

The number of passed entities.

", + "Counters$failed": "

The number of failed entities.

", + "Counters$warned": "

The number of warned entities.

", + "Counters$errored": "

The number of errored entities.

", + "Counters$stopped": "

The number of stopped entities.

", + "Counters$skipped": "

The number of skipped entities.

", + "Resolution$width": "

The screen resolution's width, expressed in pixels.

", + "Resolution$height": "

The screen resolution's height, expressed in pixels.

", + "Run$totalJobs": "

The total number of jobs for the run.

", + "Run$completedJobs": "

The total number of completed jobs.

" + } + }, + "Job": { + "base": "

Represents a device.

", + "refs": { + "GetJobResult$job": null, + "Jobs$member": null + } + }, + "Jobs": { + "base": null, + "refs": { + "ListJobsResult$jobs": "

Information about the jobs.

" + } + }, + "LimitExceededException": { + "base": "

A limit was exceeded.

", + "refs": { + } + }, + "ListArtifactsRequest": { + "base": "

Represents a request to the list artifacts operation.

", + "refs": { + } + }, + "ListArtifactsResult": { + "base": "

Represents the result of a list artifacts operation.

", + "refs": { + } + }, + "ListDevicePoolsRequest": { + "base": "

Represents the result of a list device pools request.

", + "refs": { + } + }, + "ListDevicePoolsResult": { + "base": "

Represents the result of a list device pools request.

", + "refs": { + } + }, + "ListDevicesRequest": { + "base": "

Represents the result of a list devices request.

", + "refs": { + } + }, + "ListDevicesResult": { + "base": "

Represents the result of a list devices operation.

", + "refs": { + } + }, + "ListJobsRequest": { + "base": "

Represents a request to the list jobs operation.

", + "refs": { + } + }, + "ListJobsResult": { + "base": "

Represents the result of a list jobs request.

", + "refs": { + } + }, + "ListProjectsRequest": { + "base": "

Represents a request to the list projects operation.

", + "refs": { + } + }, + "ListProjectsResult": { + "base": "

Represents the result of a list projects request.

", + "refs": { + } + }, + "ListRunsRequest": { + "base": "

Represents a request to the list runs operation.

", + "refs": { + } + }, + "ListRunsResult": { + "base": "

Represents the result of a list runs request.

", + "refs": { + } + }, + "ListSamplesRequest": { + "base": "

Represents a request to the list samples operation.

", + "refs": { + } + }, + "ListSamplesResult": { + "base": "

Represents the result of a list samples request.

", + "refs": { + } + }, + "ListSuitesRequest": { + "base": "

Represents a request to the list suites operation.

", + "refs": { + } + }, + "ListSuitesResult": { + "base": "

Represents the result of a list suites request.

", + "refs": { + } + }, + "ListTestsRequest": { + "base": "

Represents a request to the list tests operation.

", + "refs": { + } + }, + "ListTestsResult": { + "base": "

Represents the result of a list tests request.

", + "refs": { + } + }, + "ListUniqueProblemsRequest": { + "base": "

Represents a request to the list unique problems operation.

", + "refs": { + } + }, + "ListUniqueProblemsResult": { + "base": "

Represents the result of a list unique problems request.

", + "refs": { + } + }, + "ListUploadsRequest": { + "base": "

Represents a request to the list uploads operation.

", + "refs": { + } + }, + "ListUploadsResult": { + "base": "

Represents the result of a list uploads request.

", + "refs": { + } + }, + "Location": { + "base": "

Represents a latitude and longitude pair, expressed in geographic coordinate system degrees (for example 47.6204, -122.3491).

Elevation is currently not supported.

", + "refs": { + "ScheduleRunConfiguration$location": "

Information about the location that is used for the run.

" + } + }, + "Long": { + "base": null, + "refs": { + "Device$heapSize": "

The device's heap size, expressed in bytes.

", + "Device$memory": "

The device's total memory size, expressed in bytes.

" + } + }, + "Message": { + "base": null, + "refs": { + "ArgumentException$message": "

Any additional information about the exception.

", + "CreateDevicePoolRequest$description": "

The device pool's description.

", + "DevicePool$description": "

The device pool's description.

", + "IdempotencyException$message": "

Any additional information about the exception.

", + "IncompatibilityMessage$message": "

A message about the incompatibility.

", + "Job$message": "

A message about the job's result.

", + "LimitExceededException$message": "

Any additional information about the exception.

", + "NotFoundException$message": "

Any additional information about the exception.

", + "Problem$message": "

A message about the problem's result.

", + "Run$message": "

A message about the run's result.

", + "ServiceAccountException$message": "

Any additional information about the exception.

", + "Suite$message": "

A message about the suite's result.

", + "Test$message": "

A message about the test's result.

", + "UniqueProblem$message": "

A message about the unique problems' result.

", + "Upload$message": "

A message about the upload's result.

" + } + }, + "Metadata": { + "base": null, + "refs": { + "Upload$metadata": "

The upload's metadata. This contains information that is parsed from the manifest and is displayed in the AWS Device Farm console after the associated app is uploaded.

" + } + }, + "Name": { + "base": null, + "refs": { + "Artifact$name": "

The artifact's name.

", + "CreateDevicePoolRequest$name": "

The device pool's name.

", + "CreateProjectRequest$name": "

The project's name.

", + "CreateUploadRequest$name": "

The upload's file name.

", + "Device$name": "

The device's display name.

", + "DevicePool$name": "

The device pool's name.

", + "Job$name": "

The job's name.

", + "ProblemDetail$name": "

The problem detail's name.

", + "Project$name": "

The project's name.

", + "Run$name": "

The run's name.

", + "ScheduleRunRequest$name": "

The name for the run to be scheduled.

", + "Suite$name": "

The suite's name.

", + "Test$name": "

The test's name.

", + "Upload$name": "

The upload's file name.

" + } + }, + "NotFoundException": { + "base": "

The specified entity was not found.

", + "refs": { + } + }, + "PaginationToken": { + "base": null, + "refs": { + "ListArtifactsRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListArtifactsResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListDevicePoolsRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListDevicePoolsResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListDevicesRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListDevicesResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListJobsRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListJobsResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListProjectsRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListProjectsResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListRunsRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListRunsResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListSamplesRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListSamplesResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListSuitesRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListSuitesResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListTestsRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListTestsResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListUniqueProblemsRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListUniqueProblemsResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

", + "ListUploadsRequest$nextToken": "

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

", + "ListUploadsResult$nextToken": "

If the number of items that are returned is significantly large, this is an identifier that is also returned, which can be used in a subsequent call to this operation to return the next set of items in the list.

" + } + }, + "Problem": { + "base": "

Represents a specific warning or failure.

", + "refs": { + "Problems$member": null + } + }, + "ProblemDetail": { + "base": "

Information about a problem detail.

", + "refs": { + "Problem$run": "

Information about the associated run.

", + "Problem$job": "

Information about the associated job.

", + "Problem$suite": "

Information about the associated suite.

", + "Problem$test": "

Information about the associated test.

" + } + }, + "Problems": { + "base": null, + "refs": { + "UniqueProblem$problems": "

Information about the problems.

" + } + }, + "Project": { + "base": "

Represents an operating-system neutral workspace for running and managing tests.

", + "refs": { + "CreateProjectResult$project": "

The newly created project.

", + "GetProjectResult$project": null, + "Projects$member": null + } + }, + "Projects": { + "base": null, + "refs": { + "ListProjectsResult$projects": "

Information about the projects.

" + } + }, + "Radios": { + "base": "

Represents the set of radios and their states on a device. Examples of radios include Wi-Fi, GPS, Bluetooth, and NFC.

", + "refs": { + "ScheduleRunConfiguration$radios": "

Information about the radio states for the run.

" + } + }, + "Resolution": { + "base": "

Represents the screen resolution of a device in height and width, expressed in pixels.

", + "refs": { + "Device$resolution": null + } + }, + "Rule": { + "base": "

Represents a condition for a device pool.

", + "refs": { + "Rules$member": null + } + }, + "RuleOperator": { + "base": null, + "refs": { + "Rule$operator": "

The rule's operator.

" + } + }, + "Rules": { + "base": null, + "refs": { + "CreateDevicePoolRequest$rules": "

The device pool's rules.

", + "DevicePool$rules": "

Information about the device pool's rules.

" + } + }, + "Run": { + "base": "

Represents an app on a set of devices with a specific test and configuration.

", + "refs": { + "GetRunResult$run": null, + "Runs$member": null, + "ScheduleRunResult$run": "

Information about the scheduled run.

" + } + }, + "Runs": { + "base": null, + "refs": { + "ListRunsResult$runs": "

Information about the runs.

" + } + }, + "Sample": { + "base": "

Represents a sample of performance data.

", + "refs": { + "Samples$member": null + } + }, + "SampleType": { + "base": null, + "refs": { + "Sample$type": "

The sample's type.

Must be one of the following values:

" + } + }, + "Samples": { + "base": null, + "refs": { + "ListSamplesResult$samples": "

Information about the samples.

" + } + }, + "ScheduleRunConfiguration": { + "base": "

Represents the settings for a run. Includes things like location, radio states, auxiliary apps, and network profiles.

", + "refs": { + "ScheduleRunRequest$configuration": "

Information about the settings for the run to be scheduled.

" + } + }, + "ScheduleRunRequest": { + "base": "

Represents a request to the schedule run operation.

", + "refs": { + } + }, + "ScheduleRunResult": { + "base": "

Represents the result of a schedule run request.

", + "refs": { + } + }, + "ScheduleRunTest": { + "base": "

Represents additional test settings.

", + "refs": { + "ScheduleRunRequest$test": "

Information about the test for the run to be scheduled.

" + } + }, + "ServiceAccountException": { + "base": "

There was a problem with the service account.

", + "refs": { + } + }, + "String": { + "base": null, + "refs": { + "Artifact$extension": "

The artifact's file extension.

", + "CPU$frequency": "

The CPU's frequency.

", + "CPU$architecture": "

The CPU's architecture, for example x86 or ARM.

", + "Device$manufacturer": "

The device's manufacturer name.

", + "Device$model": "

The device's model name.

", + "Device$os": "

The device's operating system type.

", + "Device$image": "

The device's image name.

", + "Device$carrier": "

The device's carrier.

", + "Device$radio": "

The device's radio.

", + "Rule$value": "

The rule's value.

", + "ScheduleRunConfiguration$locale": "

Information about the locale that is used for the run.

", + "TestParameters$key": null, + "TestParameters$value": null + } + }, + "Suite": { + "base": "

Represents a collection of one or more tests.

", + "refs": { + "GetSuiteResult$suite": null, + "Suites$member": null + } + }, + "Suites": { + "base": null, + "refs": { + "ListSuitesResult$suites": "

Information about the suites.

" + } + }, + "Test": { + "base": "

Represents a condition that is evaluated.

", + "refs": { + "GetTestResult$test": null, + "Tests$member": null + } + }, + "TestParameters": { + "base": null, + "refs": { + "ScheduleRunTest$parameters": "

The test's parameters, such as test framework parameters and fixture settings.

" + } + }, + "TestType": { + "base": null, + "refs": { + "GetDevicePoolCompatibilityRequest$testType": "

The test type for the specified device pool.

Allowed values include the following:

", + "Job$type": "

The job's type.

Allowed values include the following:

", + "Run$type": "

The run's type.

Must be one of the following values:

", + "ScheduleRunTest$type": "

The test's type.

Must be one of the following values:

", + "Suite$type": "

The suite's type.

Must be one of the following values:

", + "Test$type": "

The test's type.

Must be one of the following values:

" + } + }, + "Tests": { + "base": null, + "refs": { + "ListTestsResult$tests": "

Information about the tests.

" + } + }, + "URL": { + "base": null, + "refs": { + "Artifact$url": "

The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the artifact's file.

", + "Sample$url": "

The pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the sample's file.

", + "Upload$url": "

The pre-signed Amazon S3 URL that was used to store a file through a corresponding PUT request.

" + } + }, + "UniqueProblem": { + "base": "

A collection of one or more problems, grouped by their result.

", + "refs": { + "UniqueProblems$member": null + } + }, + "UniqueProblems": { + "base": null, + "refs": { + "UniqueProblemsByExecutionResultMap$value": null + } + }, + "UniqueProblemsByExecutionResultMap": { + "base": null, + "refs": { + "ListUniqueProblemsResult$uniqueProblems": "

Information about the unique problems.

Allowed values include:

" + } + }, + "Upload": { + "base": "

An app or a set of one or more tests to upload or that have been uploaded.

", + "refs": { + "CreateUploadResult$upload": "

The newly created upload.

", + "GetUploadResult$upload": null, + "Uploads$member": null + } + }, + "UploadStatus": { + "base": null, + "refs": { + "Upload$status": "

The upload's status.

Must be one of the following values:

" + } + }, + "UploadType": { + "base": null, + "refs": { + "CreateUploadRequest$type": "

The upload's upload type.

Must be one of the following values:

", + "Upload$type": "

The upload's type.

Must be one of the following values:

" + } + }, + "Uploads": { + "base": null, + "refs": { + "ListUploadsResult$uploads": "

Information about the uploads.

" + } + } + } +} diff --git a/src/data/devicefarm/2015-06-23/paginators-1.json b/src/data/devicefarm/2015-06-23/paginators-1.json new file mode 100644 index 0000000000..dd41ad1ff1 --- /dev/null +++ b/src/data/devicefarm/2015-06-23/paginators-1.json @@ -0,0 +1,64 @@ +{ + "pagination": { + "ListArtifacts": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "artifacts" + }, + "ListDevicePools": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "devicePools" + }, + "ListDevices": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "devices" + }, + "ListDevices": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "devices" + }, + "ListJobs": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "jobs" + }, + "ListProjects": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "projects" + }, + "ListRuns": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "runs" + }, + "ListSamples": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "samples" + }, + "ListSuites": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "suites" + }, + "ListTests": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "tests" + }, + "ListUniqueProblems": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "uniqueProblems" + }, + "ListUploads": { + "input_token": "nextToken", + "output_token": "nextToken", + "result_key": "uploads" + } + } +} diff --git a/src/data/dynamodb/2012-08-10/api-2.json b/src/data/dynamodb/2012-08-10/api-2.json index b7a1fbcbb2..4de3546af1 100644 --- a/src/data/dynamodb/2012-08-10/api-2.json +++ b/src/data/dynamodb/2012-08-10/api-2.json @@ -569,7 +569,8 @@ "KeySchema":{"shape":"KeySchema"}, "LocalSecondaryIndexes":{"shape":"LocalSecondaryIndexList"}, "GlobalSecondaryIndexes":{"shape":"GlobalSecondaryIndexList"}, - "ProvisionedThroughput":{"shape":"ProvisionedThroughput"} + "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "StreamSpecification":{"shape":"StreamSpecification"} } }, "CreateTableOutput":{ @@ -726,7 +727,8 @@ "Backfilling":{"shape":"Backfilling"}, "ProvisionedThroughput":{"shape":"ProvisionedThroughputDescription"}, "IndexSizeBytes":{"shape":"Long"}, - "ItemCount":{"shape":"Long"} + "ItemCount":{"shape":"Long"}, + "IndexArn":{"shape":"String"} } }, "GlobalSecondaryIndexDescriptionList":{ @@ -917,7 +919,8 @@ "KeySchema":{"shape":"KeySchema"}, "Projection":{"shape":"Projection"}, "IndexSizeBytes":{"shape":"Long"}, - "ItemCount":{"shape":"Long"} + "ItemCount":{"shape":"Long"}, + "IndexArn":{"shape":"String"} } }, "LocalSecondaryIndexDescriptionList":{ @@ -1140,7 +1143,8 @@ "ProjectionExpression":{"shape":"ProjectionExpression"}, "FilterExpression":{"shape":"ConditionExpression"}, "ExpressionAttributeNames":{"shape":"ExpressionAttributeNameMap"}, - "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"} + "ExpressionAttributeValues":{"shape":"ExpressionAttributeValueMap"}, + "ConsistentRead":{"shape":"ConsistentRead"} } }, "ScanOutput":{ @@ -1177,6 +1181,29 @@ "COUNT" ] }, + "StreamArn":{ + "type":"string", + "min":37, + "max":1024 + }, + "StreamEnabled":{"type":"boolean"}, + "StreamSpecification":{ + "type":"structure", + "members":{ + "StreamEnabled":{"shape":"StreamEnabled"}, + "StreamViewType":{"shape":"StreamViewType"} + } + }, + "StreamViewType":{ + "type":"string", + "enum":[ + "NEW_IMAGE", + "OLD_IMAGE", + "NEW_AND_OLD_IMAGES", + "KEYS_ONLY" + ] + }, + "String":{"type":"string"}, "StringAttributeValue":{"type":"string"}, "StringSetAttributeValue":{ "type":"list", @@ -1193,8 +1220,12 @@ "ProvisionedThroughput":{"shape":"ProvisionedThroughputDescription"}, "TableSizeBytes":{"shape":"Long"}, "ItemCount":{"shape":"Long"}, + "TableArn":{"shape":"String"}, "LocalSecondaryIndexes":{"shape":"LocalSecondaryIndexDescriptionList"}, - "GlobalSecondaryIndexes":{"shape":"GlobalSecondaryIndexDescriptionList"} + "GlobalSecondaryIndexes":{"shape":"GlobalSecondaryIndexDescriptionList"}, + "StreamSpecification":{"shape":"StreamSpecification"}, + "LatestStreamLabel":{"shape":"String"}, + "LatestStreamArn":{"shape":"StreamArn"} } }, "TableName":{ @@ -1264,7 +1295,8 @@ "AttributeDefinitions":{"shape":"AttributeDefinitions"}, "TableName":{"shape":"TableName"}, "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, - "GlobalSecondaryIndexUpdates":{"shape":"GlobalSecondaryIndexUpdateList"} + "GlobalSecondaryIndexUpdates":{"shape":"GlobalSecondaryIndexUpdateList"}, + "StreamSpecification":{"shape":"StreamSpecification"} } }, "UpdateTableOutput":{ diff --git a/src/data/dynamodb/2012-08-10/docs-2.json b/src/data/dynamodb/2012-08-10/docs-2.json index 33d21cf683..400d816357 100644 --- a/src/data/dynamodb/2012-08-10/docs-2.json +++ b/src/data/dynamodb/2012-08-10/docs-2.json @@ -1,19 +1,19 @@ { "version": "2.0", "operations": { - "BatchGetItem": "

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return attributes in any particular order. To help parse the response by item, include the primary key values for the items in your request in the AttributesToGet parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

", - "BatchWriteItem": "

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem API.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, such as Java, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, such as PHP, you must update or delete the specified items one at a time. In both situations, BatchWriteItem provides an alternative where the API performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

", + "BatchGetItem": "

The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.

A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get.

If you request more than 100 items BatchGetItem will return a ValidationException with the message \"Too many items requested for the BatchGetItem call\".

For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one data set.

If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem will return a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables.

In order to minimize response latency, BatchGetItem retrieves items in parallel.

When designing your application, keep in mind that DynamoDB does not return attributes in any particular order. To help parse the response by item, include the primary key values for the items in your request in the AttributesToGet parameter.

If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Capacity Units Calculations in the Amazon DynamoDB Developer Guide.

", + "BatchWriteItem": "

The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.

BatchWriteItem cannot update items. To update items, use the UpdateItem API.

The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

Note that if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem will return a ProvisionedThroughputExceededException.

If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce (EMR), or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem provides an alternative where the API performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

If one or more of the following is true, DynamoDB rejects the entire batch write operation:

", "CreateTable": "

The CreateTable operation adds a new table to your account. In an AWS account, table names must be unique within each region. That is, you can have two tables with same name if you create the tables in different regions.

CreateTable is an asynchronous operation. Upon receiving a CreateTable request, DynamoDB immediately returns a response with a TableStatus of CREATING. After the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform read and write operations only on an ACTIVE table.

You can optionally define secondary indexes on the new table, as part of the CreateTable operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING state at any given time.

You can use the DescribeTable API to check the table status.

", "DeleteItem": "

Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.

In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

Unless you specify conditions, the DeleteItem is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.

Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.

", - "DeleteTable": "

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

When you delete a table, any indexes on that table are also deleted.

Use the DescribeTable API to check the status of the table.

", + "DeleteTable": "

The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete.

When you delete a table, any indexes on that table are also deleted.

If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

Use the DescribeTable API to check the status of the table.

", "DescribeTable": "

Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

", "GetItem": "

The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data.

GetItem provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead to true. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.

", "ListTables": "

Returns an array of table names associated with the current account and endpoint. The output from ListTables is paginated, with each page returning a maximum of 100 table names.

", "PutItem": "

Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values.

In addition to putting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.

When you add an item, the primary key attribute(s) are the only required attributes. Attribute values cannot be null. String and Binary type attributes must have lengths greater than zero. Set type attributes cannot be empty. Requests with empty values will be rejected with a ValidationException exception.

You can request that PutItem return either a copy of the original item (before the update) or a copy of the updated item (after the update). For more information, see the ReturnValues description below.

To prevent a new item from replacing an existing item, use a conditional put operation with ComparisonOperator set to NULL for the primary key attribute, or attributes.

For more information about using this API, see Working with Items in the Amazon DynamoDB Developer Guide.

", - "Query": "

A Query operation uses the primary key of a table or a secondary index to directly access items from that table or index.

Use the KeyConditionExpression parameter to provide a specific hash key value. The Query operation will return all of the items from the table or index with that hash key value. You can optionally narrow the scope of the Query by specifying a range key value and a comparison operator in the KeyConditionExpression. You can use the ScanIndexForward parameter to get results in forward or reverse order, by range key or by index key.

Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

If the total number of items meeting the query criteria exceeds the result set size limit of 1 MB, the query stops and results are returned to the user with LastEvaluatedKey to continue the query in a subsequent operation. Unlike a Scan operation, a Query operation never returns both an empty result set and a LastEvaluatedKey. The LastEvaluatedKey is only provided if the results exceed 1 MB, or if you have used Limit.

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set ConsistentRead to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

", - "Scan": "

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a ScanFilter operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

The result set is eventually consistent.

By default, Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

", + "Query": "

A Query operation uses the primary key of a table or a secondary index to directly access items from that table or index.

Use the KeyConditionExpression parameter to provide a specific hash key value. The Query operation will return all of the items from the table or index with that hash key value. You can optionally narrow the scope of the Query operation by specifying a range key value and a comparison operator in KeyConditionExpression. You can use the ScanIndexForward parameter to get results in forward or reverse order, by range key or by index key.

Queries that do not return results consume the minimum number of read capacity units for that type of read operation.

If the total number of items meeting the query criteria exceeds the result set size limit of 1 MB, the query stops and results are returned to the user with the LastEvaluatedKey element to continue the query in a subsequent operation. Unlike a Scan operation, a Query operation never returns both an empty result set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if the results exceed 1 MB, or if you have used the Limit parameter.

You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead parameter to true and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead when querying a global secondary index.

", + "Scan": "

The Scan operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a ScanFilter operation.

If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.

By default, Scan operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan operation by providing the Segment and TotalSegments parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.

By default, Scan uses eventually consistent reads when acessing the data in the table or local secondary index. However, you can use strongly consistent reads instead by setting the ConsistentRead parameter to true.

", "UpdateItem": "

Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values). If conditions are specified and the item does not exist, then the operation fails and a new item is not created.

You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.

", - "UpdateTable": "

Updates the provisioned throughput for the given table, or manages the global secondary indexes on the table.

You can increase or decrease the table's provisioned throughput values within the maximums and minimums listed in the Limits section in the Amazon DynamoDB Developer Guide.

In addition, you can use UpdateTable to add, modify or delete global secondary indexes on the table. For more information, see Managing Global Secondary Indexes in the Amazon DynamoDB Developer Guide.

The table must be in the ACTIVE state for UpdateTable to succeed. UpdateTable is an asynchronous operation; while executing the operation, the table is in the UPDATING state. While the table is in the UPDATING state, the table still has the provisioned throughput from before the call. The table's new provisioned throughput settings go into effect when the table returns to the ACTIVE state; at that point, the UpdateTable operation is complete.

" + "UpdateTable": "

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

You can only perform one of the following operations at once:

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

" }, "service": "Amazon DynamoDB

Overview

This is the Amazon DynamoDB API Reference. This guide provides descriptions and samples of the low-level DynamoDB API. For information about DynamoDB application development, see the Amazon DynamoDB Developer Guide.

Instead of making the requests to the low-level DynamoDB API directly from your application, we recommend that you use the AWS Software Development Kits (SDKs). The easy-to-use libraries in the AWS SDKs make it unnecessary to call the low-level DynamoDB API directly from your application. The libraries take care of request authentication, serialization, and connection management. For more information, see Using the AWS SDKs with DynamoDB in the Amazon DynamoDB Developer Guide.

If you decide to code against the low-level DynamoDB API directly, you will need to write the necessary code to authenticate your requests. For more information on signing your requests, see Using the DynamoDB API in the Amazon DynamoDB Developer Guide.

The following are short descriptions of each low-level API action, organized by function.

Managing Tables

For conceptual information about managing tables, see Working with Tables in the Amazon DynamoDB Developer Guide.

Reading Data

For conceptual information about reading data, see Working with Items and Query and Scan Operations in the Amazon DynamoDB Developer Guide.

Modifying Data

For conceptual information about modifying data, see Working with Items and Query and Scan Operations in the Amazon DynamoDB Developer Guide.

", "shapes": { @@ -125,7 +125,7 @@ "BatchGetRequestMap": { "base": null, "refs": { - "BatchGetItemInput$RequestItems": "

A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per BatchGetItem request.

Each element in the map of items to retrieve consists of the following:

", + "BatchGetItemInput$RequestItems": "

A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per BatchGetItem request.

Each element in the map of items to retrieve consists of the following:

", "BatchGetItemOutput$UnprocessedKeys": "

A map of tables and their respective keys that were not processed with the current response. The UnprocessedKeys value is in the same form as RequestItems, so the value can be provided directly to a subsequent BatchGetItem operation. For more information, see RequestItems in the Request Parameters section.

Each element consists of:

If there are no unprocessed keys remaining, the response contains an empty UnprocessedKeys map.

" } }, @@ -175,7 +175,7 @@ "base": null, "refs": { "ExpectedAttributeValue$Exists": "

Causes DynamoDB to evaluate the value before attempting a conditional operation:

The default setting for Exists is true. If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true, because it is implied.

DynamoDB returns a ValidationException if:

", - "QueryInput$ScanIndexForward": "

A value that specifies ascending (true) or descending (false) traversal of the index. DynamoDB returns results reflecting the requested order determined by the range key. If the data type is Number, the results are returned in numeric order. For type String, the results are returned in order of ASCII character code values. For type Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.

If ScanIndexForward is not specified, the results are returned in ascending order.

" + "QueryInput$ScanIndexForward": "

Specifies the order in which to return the query results - either ascending (true) or descending (false).

Items with the same hash key are stored in sorted order by range key .If the range key data type is Number, the results are stored in numeric order. For type String, the results are returned in order of ASCII character code values. For type Binary, DynamoDB treats each byte of the binary data as unsigned.

If ScanIndexForward is true, DynamoDB returns the results in order, by range key. This is the default behavior.

If ScanIndexForward is false, DynamoDB sorts the results in descending order by range key, and then returns the results to the client.

" } }, "Capacity": { @@ -202,11 +202,11 @@ "ConditionExpression": { "base": null, "refs": { - "DeleteItemInput$ConditionExpression": "

A condition that must be satisfied in order for a conditional DeleteItem to succeed.

An expression can contain any of the following:

For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

", - "PutItemInput$ConditionExpression": "

A condition that must be satisfied in order for a conditional PutItem operation to succeed.

An expression can contain any of the following:

For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

", + "DeleteItemInput$ConditionExpression": "

A condition that must be satisfied in order for a conditional DeleteItem to succeed.

An expression can contain any of the following:

For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

", + "PutItemInput$ConditionExpression": "

A condition that must be satisfied in order for a conditional PutItem operation to succeed.

An expression can contain any of the following:

For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

", "QueryInput$FilterExpression": "

A string that contains conditions that DynamoDB applies after the Query operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned.

A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide.

FilterExpression replaces the legacy QueryFilter and ConditionalOperator parameters.

", "ScanInput$FilterExpression": "

A string that contains conditions that DynamoDB applies after the Scan operation, but before the data is returned to you. Items that do not satisfy the FilterExpression criteria are not returned.

A FilterExpression is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

For more information, see Filter Expressions in the Amazon DynamoDB Developer Guide.

FilterExpression replaces the legacy ScanFilter and ConditionalOperator parameters.

", - "UpdateItemInput$ConditionExpression": "

A condition that must be satisfied in order for a conditional update to succeed.

An expression can contain any of the following:

For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

" + "UpdateItemInput$ConditionExpression": "

A condition that must be satisfied in order for a conditional update to succeed.

An expression can contain any of the following:

For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

ConditionExpression replaces the legacy ConditionalOperator and Expected parameters.

" } }, "ConditionalCheckFailedException": { @@ -227,9 +227,10 @@ "ConsistentRead": { "base": null, "refs": { - "GetItemInput$ConsistentRead": "

A value that if set to true, then the operation uses strongly consistent reads; otherwise, eventually consistent reads are used.

", + "GetItemInput$ConsistentRead": "

Determines the read consistency model: If set to true, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.

", "KeysAndAttributes$ConsistentRead": "

The consistency of a read operation. If set to true, then a strongly consistent read is used; otherwise, an eventually consistent read is used.

", - "QueryInput$ConsistentRead": "

A value that if set to true, then the operation uses strongly consistent reads; otherwise, eventually consistent reads are used.

Strongly consistent reads are not supported on global secondary indexes. If you query a global secondary index with ConsistentRead set to true, you will receive an error message.

" + "QueryInput$ConsistentRead": "

Determines the read consistency model: If set to true, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.

Strongly consistent reads are not supported on global secondary indexes. If you query a global secondary index with ConsistentRead set to true, you will receive a ValidationException.

", + "ScanInput$ConsistentRead": "

A Boolean value that determines the read consistency model during the scan:

The default setting for ConsistentRead is false, meaning that eventually consistent reads will be used.

Strongly consistent reads are not supported on global secondary indexes. If you scan a global secondary index with ConsistentRead set to true, you will receive a ValidationException.

" } }, "ConsumedCapacity": { @@ -353,13 +354,13 @@ "ExpressionAttributeNameMap": { "base": null, "refs": { - "DeleteItemInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "GetItemInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "KeysAndAttributes$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "PutItemInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "QueryInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "ScanInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "UpdateItemInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

" + "DeleteItemInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

", + "GetItemInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

", + "KeysAndAttributes$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

", + "PutItemInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

", + "QueryInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

", + "ScanInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

", + "UpdateItemInput$ExpressionAttributeNames": "

One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames:

Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:

The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames:

You could then use this substitution in an expression, as in this example:

Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.

For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.

" } }, "ExpressionAttributeNameVariable": { @@ -371,11 +372,11 @@ "ExpressionAttributeValueMap": { "base": null, "refs": { - "DeleteItemInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "PutItemInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "QueryInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "ScanInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

", - "UpdateItemInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

" + "DeleteItemInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

", + "PutItemInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

", + "QueryInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

", + "ScanInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

", + "UpdateItemInput$ExpressionAttributeValues": "

One or more values that can be substituted in an expression.

Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:

Available | Backordered | Discontinued

You would first need to specify ExpressionAttributeValues as follows:

{ \":avail\":{\"S\":\"Available\"}, \":back\":{\"S\":\"Backordered\"}, \":disc\":{\"S\":\"Discontinued\"} }

You could then use these values in an expression, such as this:

ProductStatus IN (:avail, :back, :disc)

For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.

" } }, "ExpressionAttributeValueVariable": { @@ -387,7 +388,7 @@ "FilterConditionMap": { "base": null, "refs": { - "QueryInput$QueryFilter": "

This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

A condition that evaluates the query results after the items are read and returns only the desired values.

This parameter does not support attributes of type List or Map.

A QueryFilter is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

If you provide more than one condition in the QueryFilter map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

Note that QueryFilter does not allow key attributes. You cannot define a filter condition on a hash key or range key.

Each QueryFilter element consists of an attribute name to compare, along with the following:

", + "QueryInput$QueryFilter": "

This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

A condition that evaluates the query results after the items are read and returns only the desired values.

This parameter does not support attributes of type List or Map.

A QueryFilter is applied after the items have already been read; the process of filtering does not consume any additional read capacity units.

If you provide more than one condition in the QueryFilter map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

Note that QueryFilter does not allow key attributes. You cannot define a filter condition on a hash key or range key.

Each QueryFilter element consists of an attribute name to compare, along with the following:

", "ScanInput$ScanFilter": "

This is a legacy parameter, for backward compatibility. New applications should use FilterExpression instead. Do not combine legacy parameters and expression parameters in a single API call; otherwise, DynamoDB will return a ValidationException exception.

A condition that evaluates the scan results and returns only the desired values.

This parameter does not support attributes of type List or Map.

If you specify more than one condition in the ScanFilter map, then by default all of the conditions must evaluate to true. In other words, the conditions are ANDed together. (You can use the ConditionalOperator parameter to OR the conditions instead. If you do this, then at least one of the conditions must evaluate to true, rather than all of them.)

Each ScanFilter element consists of an attribute name to compare, along with the following:

" } }, @@ -434,7 +435,7 @@ "GlobalSecondaryIndexUpdateList": { "base": null, "refs": { - "UpdateTableInput$GlobalSecondaryIndexUpdates": "

An array of one or more global secondary indexes for the table. For each index in the array, you can request one action:

" + "UpdateTableInput$GlobalSecondaryIndexUpdates": "

An array of one or more global secondary indexes for the table. For each index in the array, you can request one action:

For more information, see Managing Global Secondary Indexes in the Amazon DynamoDB Developer Guide.

" } }, "IndexName": { @@ -547,7 +548,7 @@ "KeyExpression": { "base": null, "refs": { - "QueryInput$KeyConditionExpression": "

The condition that specifies the key value(s) for items to be retrieved by the Query action.

The condition must perform an equality test on a single hash key value. The condition can also test for one or more range key values. A Query can use KeyConditionExpression to retrieve a single item with a given hash and range key value, or several items that have the same hash key value but different range key values.

The hash key equality test is required, and must be specified in the following format:

hashAttributeName = :hashval

If you also want to provide a range key condition, it must be combined using AND with the hash key condition. Following is an example, using the = comparison operator for the range key:

hashAttributeName = :hashval AND rangeAttributeName = :rangeval

Valid comparisons for the range key condition are as follows:

Use the ExpressionAttributeValues parameter to replace tokens such as :hashval and :rangeval with actual values at runtime.

You can optionally use the ExpressionAttributeNames parameter to replace the names of the hash and range attributes with placeholder tokens. This might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression causes an error because Size is a reserved word:

To work around this, define a placeholder (such a #myval) to represent the attribute name Size. KeyConditionExpression then is as follows:

For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide.

For more information on ExpressionAttributeNames and ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

KeyConditionExpression replaces the legacy KeyConditions parameter.

" + "QueryInput$KeyConditionExpression": "

The condition that specifies the key value(s) for items to be retrieved by the Query action.

The condition must perform an equality test on a single hash key value. The condition can also perform one of several comparison tests on a single range key value. Query can use KeyConditionExpression to retrieve one item with a given hash and range key value, or several items that have the same hash key value but different range key values.

The hash key equality test is required, and must be specified in the following format:

hashAttributeName = :hashval

If you also want to provide a range key condition, it must be combined using AND with the hash key condition. Following is an example, using the = comparison operator for the range key:

hashAttributeName = :hashval AND rangeAttributeName = :rangeval

Valid comparisons for the range key condition are as follows:

Use the ExpressionAttributeValues parameter to replace tokens such as :hashval and :rangeval with actual values at runtime.

You can optionally use the ExpressionAttributeNames parameter to replace the names of the hash and range attributes with placeholder tokens. This option might be necessary if an attribute name conflicts with a DynamoDB reserved word. For example, the following KeyConditionExpression parameter causes an error because Size is a reserved word:

To work around this, define a placeholder (such a #S) to represent the attribute name Size. KeyConditionExpression then is as follows:

For a list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide.

For more information on ExpressionAttributeNames and ExpressionAttributeValues, see Using Placeholders for Attribute Names and Values in the Amazon DynamoDB Developer Guide.

KeyConditionExpression replaces the legacy KeyConditions parameter.

" } }, "KeyList": { @@ -752,7 +753,7 @@ } }, "ProvisionedThroughputExceededException": { - "base": "

The request rate is too high, or the request is too large, for the available throughput to accommodate. The AWS SDKs automatically retry requests that receive this exception; therefore, your request will eventually succeed, unless the request is too large or your retry queue is too large to finish. Reduce the frequency of requests by using the strategies listed in Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.

", + "base": "

Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.

", "refs": { } }, @@ -800,7 +801,7 @@ } }, "ReturnConsumedCapacity": { - "base": "

A value that if set to TOTAL, the response includes ConsumedCapacity data for tables and indexes. If set to INDEXES, the response includes ConsumedCapacity for indexes. If set to NONE (the default), ConsumedCapacity is not included in the response.

", + "base": "

Determines the level of detail about provisioned throughput consumption that is returned in the response:

", "refs": { "BatchGetItemInput$ReturnConsumedCapacity": null, "BatchWriteItemInput$ReturnConsumedCapacity": null, @@ -815,17 +816,17 @@ "ReturnItemCollectionMetrics": { "base": null, "refs": { - "BatchWriteItemInput$ReturnItemCollectionMetrics": "

A value that if set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

", - "DeleteItemInput$ReturnItemCollectionMetrics": "

A value that if set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

", - "PutItemInput$ReturnItemCollectionMetrics": "

A value that if set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

", - "UpdateItemInput$ReturnItemCollectionMetrics": "

A value that if set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

" + "BatchWriteItemInput$ReturnItemCollectionMetrics": "

Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

", + "DeleteItemInput$ReturnItemCollectionMetrics": "

Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

", + "PutItemInput$ReturnItemCollectionMetrics": "

Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

", + "UpdateItemInput$ReturnItemCollectionMetrics": "

Determines whether item collection metrics are returned. If set to SIZE, the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.

" } }, "ReturnValue": { "base": null, "refs": { "DeleteItemInput$ReturnValues": "

Use ReturnValues if you want to get the item attributes as they appeared before they were deleted. For DeleteItem, the valid values are:

", - "PutItemInput$ReturnValues": "

Use ReturnValues if you want to get the item attributes as they appeared before they were updated with the PutItem request. For PutItem, the valid values are:

", + "PutItemInput$ReturnValues": "

Use ReturnValues if you want to get the item attributes as they appeared before they were updated with the PutItem request. For PutItem, the valid values are:

Other \"Valid Values\" are not relevant to PutItem.

", "UpdateItemInput$ReturnValues": "

Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem, the valid values are:

" } }, @@ -871,6 +872,41 @@ "ScanInput$Select": "

The attributes to be returned in the result. You can retrieve all item attributes, specific item attributes, or the count of matching items.

If neither Select nor AttributesToGet are specified, DynamoDB defaults to ALL_ATTRIBUTES. You cannot use both AttributesToGet and Select together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying AttributesToGet without any value for Select.)

" } }, + "StreamArn": { + "base": null, + "refs": { + "TableDescription$LatestStreamArn": "

The Amazon Resource Name (ARN) that uniquely identifies the latest stream for this table.

" + } + }, + "StreamEnabled": { + "base": null, + "refs": { + "StreamSpecification$StreamEnabled": "

Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.

" + } + }, + "StreamSpecification": { + "base": "

Represents the DynamoDB Streams configuration for a table in DynamoDB.

", + "refs": { + "CreateTableInput$StreamSpecification": "

The settings for DynamoDB Streams on the table. These settings consist of:

", + "TableDescription$StreamSpecification": "

The current DynamoDB Streams configuration for the table.

", + "UpdateTableInput$StreamSpecification": "

Represents the DynamoDB Streams configuration for the table.

You will receive a ResourceInUseException if you attempt to enable a stream on a table that already has a stream, or if you attempt to disable a stream on a table which does not have a stream.

" + } + }, + "StreamViewType": { + "base": null, + "refs": { + "StreamSpecification$StreamViewType": "

The DynamoDB Streams settings for the table. These settings consist of:

" + } + }, + "String": { + "base": null, + "refs": { + "GlobalSecondaryIndexDescription$IndexArn": "

The Amazon Resource Name (ARN) that uniquely identifies the index.

", + "LocalSecondaryIndexDescription$IndexArn": "

The Amazon Resource Name (ARN) that uniquely identifies the index.

", + "TableDescription$TableArn": "

The Amazon Resource Name (ARN) that uniquely identifies the table.

", + "TableDescription$LatestStreamLabel": "

A timestamp, in ISO 8601 format, for this stream.

Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

" + } + }, "StringAttributeValue": { "base": null, "refs": { @@ -932,7 +968,7 @@ "UpdateExpression": { "base": null, "refs": { - "UpdateItemInput$UpdateExpression": "

An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.

The following action values are available for UpdateExpression.

You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5

For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide.

UpdateExpression replaces the legacy AttributeUpdates parameter.

" + "UpdateItemInput$UpdateExpression": "

An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.

The following action values are available for UpdateExpression.

You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5

For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide.

UpdateExpression replaces the legacy AttributeUpdates parameter.

" } }, "UpdateGlobalSecondaryIndexAction": { diff --git a/src/data/dynamodb/2012-08-10/paginators-1.json b/src/data/dynamodb/2012-08-10/paginators-1.json index 5cb87c9ba0..d4075e1207 100644 --- a/src/data/dynamodb/2012-08-10/paginators-1.json +++ b/src/data/dynamodb/2012-08-10/paginators-1.json @@ -19,6 +19,7 @@ "Scan": { "input_token": "ExclusiveStartKey", "output_token": "LastEvaluatedKey", + "limit_key": "Limit", "result_key": "Items" } } diff --git a/src/data/manifest.json b/src/data/manifest.json index 18a18cd932..b4b138d201 100644 --- a/src/data/manifest.json +++ b/src/data/manifest.json @@ -98,6 +98,13 @@ "2012-10-29": "2012-10-29" } }, + "devicefarm": { + "namespace": "DeviceFarm", + "versions": { + "latest": "2015-06-23", + "2015-06-23": "2015-06-23" + } + }, "directconnect": { "namespace": "DirectConnect", "versions": { @@ -308,6 +315,13 @@ "2013-06-30": "2013-06-30" } }, + "streams.dynamodb": { + "namespace": "DynamoDbStreams", + "versions": { + "latest": "2012-08-10", + "2012-08-10": "2012-08-10" + } + }, "sts": { "namespace": "Sts", "versions": { diff --git a/src/data/streams.dynamodb/2012-08-10/api-2.json b/src/data/streams.dynamodb/2012-08-10/api-2.json new file mode 100644 index 0000000000..fd8d299afe --- /dev/null +++ b/src/data/streams.dynamodb/2012-08-10/api-2.json @@ -0,0 +1,436 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2012-08-10", + "endpointPrefix":"streams.dynamodb", + "jsonVersion":"1.0", + "serviceFullName":"Amazon DynamoDB Streams", + "signatureVersion":"v4", + "signingName":"dynamodb", + "targetPrefix":"DynamoDBStreams_20120810", + "protocol":"json" + }, + "operations":{ + "DescribeStream":{ + "name":"DescribeStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeStreamInput"}, + "output":{"shape":"DescribeStreamOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + }, + "GetRecords":{ + "name":"GetRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRecordsInput"}, + "output":{"shape":"GetRecordsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"LimitExceededException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"ExpiredIteratorException", + "exception":true + }, + { + "shape":"TrimmedDataAccessException", + "exception":true + } + ] + }, + "GetShardIterator":{ + "name":"GetShardIterator", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetShardIteratorInput"}, + "output":{"shape":"GetShardIteratorOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + }, + { + "shape":"TrimmedDataAccessException", + "exception":true + } + ] + }, + "ListStreams":{ + "name":"ListStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStreamsInput"}, + "output":{"shape":"ListStreamsOutput"}, + "errors":[ + { + "shape":"ResourceNotFoundException", + "exception":true + }, + { + "shape":"InternalServerError", + "exception":true, + "fault":true + } + ] + } + }, + "shapes":{ + "AttributeMap":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "AttributeName":{ + "type":"string", + "max":65535 + }, + "AttributeValue":{ + "type":"structure", + "members":{ + "S":{"shape":"StringAttributeValue"}, + "N":{"shape":"NumberAttributeValue"}, + "B":{"shape":"BinaryAttributeValue"}, + "SS":{"shape":"StringSetAttributeValue"}, + "NS":{"shape":"NumberSetAttributeValue"}, + "BS":{"shape":"BinarySetAttributeValue"}, + "M":{"shape":"MapAttributeValue"}, + "L":{"shape":"ListAttributeValue"}, + "NULL":{"shape":"NullAttributeValue"}, + "BOOL":{"shape":"BooleanAttributeValue"} + } + }, + "BinaryAttributeValue":{"type":"blob"}, + "BinarySetAttributeValue":{ + "type":"list", + "member":{"shape":"BinaryAttributeValue"} + }, + "BooleanAttributeValue":{"type":"boolean"}, + "Date":{"type":"timestamp"}, + "DescribeStreamInput":{ + "type":"structure", + "required":["StreamArn"], + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "ExclusiveStartShardId":{"shape":"ShardId"} + } + }, + "DescribeStreamOutput":{ + "type":"structure", + "members":{ + "StreamDescription":{"shape":"StreamDescription"} + } + }, + "ErrorMessage":{"type":"string"}, + "ExpiredIteratorException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "GetRecordsInput":{ + "type":"structure", + "required":["ShardIterator"], + "members":{ + "ShardIterator":{"shape":"ShardIterator"}, + "Limit":{"shape":"PositiveIntegerObject"} + } + }, + "GetRecordsOutput":{ + "type":"structure", + "members":{ + "Records":{"shape":"RecordList"}, + "NextShardIterator":{"shape":"ShardIterator"} + } + }, + "GetShardIteratorInput":{ + "type":"structure", + "required":[ + "StreamArn", + "ShardId", + "ShardIteratorType" + ], + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "ShardId":{"shape":"ShardId"}, + "ShardIteratorType":{"shape":"ShardIteratorType"}, + "SequenceNumber":{"shape":"SequenceNumber"} + } + }, + "GetShardIteratorOutput":{ + "type":"structure", + "members":{ + "ShardIterator":{"shape":"ShardIterator"} + } + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "KeySchema":{ + "type":"list", + "member":{"shape":"KeySchemaElement"}, + "min":1, + "max":2 + }, + "KeySchemaAttributeName":{ + "type":"string", + "min":1, + "max":255 + }, + "KeySchemaElement":{ + "type":"structure", + "required":[ + "AttributeName", + "KeyType" + ], + "members":{ + "AttributeName":{"shape":"KeySchemaAttributeName"}, + "KeyType":{"shape":"KeyType"} + } + }, + "KeyType":{ + "type":"string", + "enum":[ + "HASH", + "RANGE" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ListAttributeValue":{ + "type":"list", + "member":{"shape":"AttributeValue"} + }, + "ListStreamsInput":{ + "type":"structure", + "members":{ + "TableName":{"shape":"TableName"}, + "Limit":{"shape":"PositiveIntegerObject"}, + "ExclusiveStartStreamArn":{"shape":"StreamArn"} + } + }, + "ListStreamsOutput":{ + "type":"structure", + "members":{ + "Streams":{"shape":"StreamList"}, + "LastEvaluatedStreamArn":{"shape":"StreamArn"} + } + }, + "MapAttributeValue":{ + "type":"map", + "key":{"shape":"AttributeName"}, + "value":{"shape":"AttributeValue"} + }, + "NullAttributeValue":{"type":"boolean"}, + "NumberAttributeValue":{"type":"string"}, + "NumberSetAttributeValue":{ + "type":"list", + "member":{"shape":"NumberAttributeValue"} + }, + "OperationType":{ + "type":"string", + "enum":[ + "INSERT", + "MODIFY", + "REMOVE" + ] + }, + "PositiveIntegerObject":{ + "type":"integer", + "min":1 + }, + "PositiveLongObject":{ + "type":"long", + "min":1 + }, + "Record":{ + "type":"structure", + "members":{ + "eventID":{"shape":"String"}, + "eventName":{"shape":"OperationType"}, + "eventVersion":{"shape":"String"}, + "eventSource":{"shape":"String"}, + "awsRegion":{"shape":"String"}, + "dynamodb":{"shape":"StreamRecord"} + } + }, + "RecordList":{ + "type":"list", + "member":{"shape":"Record"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "SequenceNumber":{ + "type":"string", + "min":21, + "max":40 + }, + "SequenceNumberRange":{ + "type":"structure", + "members":{ + "StartingSequenceNumber":{"shape":"SequenceNumber"}, + "EndingSequenceNumber":{"shape":"SequenceNumber"} + } + }, + "Shard":{ + "type":"structure", + "members":{ + "ShardId":{"shape":"ShardId"}, + "SequenceNumberRange":{"shape":"SequenceNumberRange"}, + "ParentShardId":{"shape":"ShardId"} + } + }, + "ShardDescriptionList":{ + "type":"list", + "member":{"shape":"Shard"} + }, + "ShardId":{ + "type":"string", + "min":28, + "max":65 + }, + "ShardIterator":{ + "type":"string", + "min":1, + "max":2048 + }, + "ShardIteratorType":{ + "type":"string", + "enum":[ + "TRIM_HORIZON", + "LATEST", + "AT_SEQUENCE_NUMBER", + "AFTER_SEQUENCE_NUMBER" + ] + }, + "Stream":{ + "type":"structure", + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "TableName":{"shape":"TableName"}, + "StreamLabel":{"shape":"String"} + } + }, + "StreamArn":{ + "type":"string", + "min":37, + "max":1024 + }, + "StreamDescription":{ + "type":"structure", + "members":{ + "StreamArn":{"shape":"StreamArn"}, + "StreamLabel":{"shape":"String"}, + "StreamStatus":{"shape":"StreamStatus"}, + "StreamViewType":{"shape":"StreamViewType"}, + "CreationRequestDateTime":{"shape":"Date"}, + "TableName":{"shape":"TableName"}, + "KeySchema":{"shape":"KeySchema"}, + "Shards":{"shape":"ShardDescriptionList"}, + "LastEvaluatedShardId":{"shape":"ShardId"} + } + }, + "StreamList":{ + "type":"list", + "member":{"shape":"Stream"} + }, + "StreamRecord":{ + "type":"structure", + "members":{ + "Keys":{"shape":"AttributeMap"}, + "NewImage":{"shape":"AttributeMap"}, + "OldImage":{"shape":"AttributeMap"}, + "SequenceNumber":{"shape":"SequenceNumber"}, + "SizeBytes":{"shape":"PositiveLongObject"}, + "StreamViewType":{"shape":"StreamViewType"} + } + }, + "StreamStatus":{ + "type":"string", + "enum":[ + "ENABLING", + "ENABLED", + "DISABLING", + "DISABLED" + ] + }, + "StreamViewType":{ + "type":"string", + "enum":[ + "NEW_IMAGE", + "OLD_IMAGE", + "NEW_AND_OLD_IMAGES", + "KEYS_ONLY" + ] + }, + "String":{"type":"string"}, + "StringAttributeValue":{"type":"string"}, + "StringSetAttributeValue":{ + "type":"list", + "member":{"shape":"StringAttributeValue"} + }, + "TableName":{ + "type":"string", + "min":3, + "max":255, + "pattern":"[a-zA-Z0-9_.-]+" + }, + "TrimmedDataAccessException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "exception":true + } + } +} diff --git a/src/data/streams.dynamodb/2012-08-10/docs-2.json b/src/data/streams.dynamodb/2012-08-10/docs-2.json new file mode 100644 index 0000000000..4868781693 --- /dev/null +++ b/src/data/streams.dynamodb/2012-08-10/docs-2.json @@ -0,0 +1,353 @@ +{ + "version": "2.0", + "operations": { + "DescribeStream": "

Returns information about a stream, including the current status of the stream, its Amazon Resource Name (ARN), the composition of its shards, and its corresponding DynamoDB table.

You can call DescribeStream at a maximum rate of 10 times per second.

Each shard in the stream has a SequenceNumberRange associated with it. If the SequenceNumberRange has a StartingSequenceNumber but no EndingSequenceNumber, then the shard is still open (able to receive more stream records). If both StartingSequenceNumber and EndingSequenceNumber are present, the that shared is closed and can no longer receive more data.

", + "GetRecords": "

Retrieves the stream records from a given shard.

Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading stream records sequentially. If there are no stream records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains stream records.

GetRecords can retrieve a maximum of 1 MB of data or 2000 stream records, whichever comes first.

", + "GetShardIterator": "

Returns a shard iterator. A shard iterator provides information about how to retrieve the stream records from within a shard. Use the shard iterator in a subsequent GetRecords request to read the stream records from the shard.

A shard iterator expires 15 minutes after it is returned to the requester.

", + "ListStreams": "

Returns an array of stream ARNs associated with the current account and endpoint. If the TableName parameter is present, then ListStreams will return only the streams ARNs for that table.

You can call ListStreams at a maximum rate of 5 times per second.

" + }, + "service": "Amazon DynamoDB Streams

This is the Amazon DynamoDB Streams API Reference. This guide describes the low-level API actions for accessing streams and processing stream records. For information about application development with DynamoDB Streams, see the Amazon DynamoDB Developer Guide.

Note that this document is intended for use with the following DynamoDB documentation:

The following are short descriptions of each low-level DynamoDB Streams API action, organized by function.

", + "shapes": { + "AttributeMap": { + "base": null, + "refs": { + "StreamRecord$Keys": "

The primary key attribute(s) for the DynamoDB item that was modified.

", + "StreamRecord$NewImage": "

The item in the DynamoDB table as it appeared after it was modified.

", + "StreamRecord$OldImage": "

The item in the DynamoDB table as it appeared before it was modified.

" + } + }, + "AttributeName": { + "base": null, + "refs": { + "AttributeMap$key": null, + "MapAttributeValue$key": null + } + }, + "AttributeValue": { + "base": "

Represents the data for an attribute. You can set one, and only one, of the elements.

Each attribute in an item is a name-value pair. An attribute can be single-valued or multi-valued set. For example, a book item can have title and authors attributes. Each book has one title but can have many authors. The multi-valued attribute is a set; duplicate values are not allowed.

", + "refs": { + "AttributeMap$value": null, + "ListAttributeValue$member": null, + "MapAttributeValue$value": null + } + }, + "BinaryAttributeValue": { + "base": null, + "refs": { + "AttributeValue$B": "

A Binary data type.

", + "BinarySetAttributeValue$member": null + } + }, + "BinarySetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BS": "

A Binary Set data type.

" + } + }, + "BooleanAttributeValue": { + "base": null, + "refs": { + "AttributeValue$BOOL": "

A Boolean data type.

" + } + }, + "Date": { + "base": null, + "refs": { + "StreamDescription$CreationRequestDateTime": "

The date and time when the request to create this stream was issued.

" + } + }, + "DescribeStreamInput": { + "base": "

Represents the input of a DescribeStream operation.

", + "refs": { + } + }, + "DescribeStreamOutput": { + "base": "

Represents the output of a DescribeStream operation.

", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "ExpiredIteratorException$message": "

The provided iterator exceeds the maximum age allowed.

", + "InternalServerError$message": "

The server encountered an internal error trying to fulfill the request.

", + "LimitExceededException$message": "

Too many operations for a given subscriber.

", + "ResourceNotFoundException$message": "

The resource which is being requested does not exist.

", + "TrimmedDataAccessException$message": "

\"The data you are trying to access has been trimmed.

" + } + }, + "ExpiredIteratorException": { + "base": "

The shard iterator has expired and can no longer be used to retrieve stream records. A shard iterator expires 15 minutes after it is retrieved using the GetShardIterator action.

", + "refs": { + } + }, + "GetRecordsInput": { + "base": "

Represents the input of a GetRecords operation.

", + "refs": { + } + }, + "GetRecordsOutput": { + "base": "

Represents the output of a GetRecords operation.

", + "refs": { + } + }, + "GetShardIteratorInput": { + "base": "

Represents the input of a GetShardIterator operation.

", + "refs": { + } + }, + "GetShardIteratorOutput": { + "base": "

Represents the output of a GetShardIterator operation.

", + "refs": { + } + }, + "InternalServerError": { + "base": "

An error occurred on the server side.

", + "refs": { + } + }, + "KeySchema": { + "base": null, + "refs": { + "StreamDescription$KeySchema": "

The key attribute(s) of the stream's DynamoDB table.

" + } + }, + "KeySchemaAttributeName": { + "base": null, + "refs": { + "KeySchemaElement$AttributeName": "

The name of a key attribute.

" + } + }, + "KeySchemaElement": { + "base": "

Represents a single element of a key schema. A key schema specifies the attributes that make up the primary key of a table, or the key attributes of an index.

A KeySchemaElement represents exactly one attribute of the primary key. For example, a hash type primary key would be represented by one KeySchemaElement. A hash-and-range type primary key would require one KeySchemaElement for the hash attribute, and another KeySchemaElement for the range attribute.

", + "refs": { + "KeySchema$member": null + } + }, + "KeyType": { + "base": null, + "refs": { + "KeySchemaElement$KeyType": "

The attribute data, consisting of the data type and the attribute value itself.

" + } + }, + "LimitExceededException": { + "base": "

Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.

", + "refs": { + } + }, + "ListAttributeValue": { + "base": null, + "refs": { + "AttributeValue$L": "

A List data type.

" + } + }, + "ListStreamsInput": { + "base": "

Represents the input of a ListStreams operation.

", + "refs": { + } + }, + "ListStreamsOutput": { + "base": "

Represents the output of a ListStreams operation.

", + "refs": { + } + }, + "MapAttributeValue": { + "base": null, + "refs": { + "AttributeValue$M": "

A Map data type.

" + } + }, + "NullAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NULL": "

A Null data type.

" + } + }, + "NumberAttributeValue": { + "base": null, + "refs": { + "AttributeValue$N": "

A Number data type.

", + "NumberSetAttributeValue$member": null + } + }, + "NumberSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$NS": "

A Number Set data type.

" + } + }, + "OperationType": { + "base": null, + "refs": { + "Record$eventName": "

The type of data modification that was performed on the DynamoDB table:

" + } + }, + "PositiveIntegerObject": { + "base": null, + "refs": { + "DescribeStreamInput$Limit": "

The maximum number of shard objects to return. The upper limit is 100.

", + "GetRecordsInput$Limit": "

The maximum number of records to return from the shard. The upper limit is 1000.

", + "ListStreamsInput$Limit": "

The maximum number of streams to return. The upper limit is 100.

" + } + }, + "PositiveLongObject": { + "base": null, + "refs": { + "StreamRecord$SizeBytes": "

The size of the stream record, in bytes.

" + } + }, + "Record": { + "base": "

A description of a unique event within a stream.

", + "refs": { + "RecordList$member": null + } + }, + "RecordList": { + "base": null, + "refs": { + "GetRecordsOutput$Records": "

The stream records from the shard, which were retrieved using the shard iterator.

" + } + }, + "ResourceNotFoundException": { + "base": "

The operation tried to access a nonexistent stream.

", + "refs": { + } + }, + "SequenceNumber": { + "base": null, + "refs": { + "GetShardIteratorInput$SequenceNumber": "

The sequence number of a stream record in the shard from which to start reading.

", + "SequenceNumberRange$StartingSequenceNumber": "

The first sequence number.

", + "SequenceNumberRange$EndingSequenceNumber": "

The last sequence number.

", + "StreamRecord$SequenceNumber": "

The sequence number of the stream record.

" + } + }, + "SequenceNumberRange": { + "base": "

The beginning and ending sequence numbers for the stream records contained within a shard.

", + "refs": { + "Shard$SequenceNumberRange": "

The range of possible sequence numbers for the shard.

" + } + }, + "Shard": { + "base": "

A uniquely identified group of stream records within a stream.

", + "refs": { + "ShardDescriptionList$member": null + } + }, + "ShardDescriptionList": { + "base": null, + "refs": { + "StreamDescription$Shards": "

The shards that comprise the stream.

" + } + }, + "ShardId": { + "base": null, + "refs": { + "DescribeStreamInput$ExclusiveStartShardId": "

The shard ID of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedShardId in the previous operation.

", + "GetShardIteratorInput$ShardId": "

The identifier of the shard. The iterator will be returned for this shard ID.

", + "Shard$ShardId": "

The system-generated identifier for this shard.

", + "Shard$ParentShardId": "

The shard ID of the current shard's parent.

", + "StreamDescription$LastEvaluatedShardId": "

The shard ID of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

If LastEvaluatedShardId is empty, then the \"last page\" of results has been processed and there is currently no more data to be retrieved.

If LastEvaluatedShardId is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedShardId is empty.

" + } + }, + "ShardIterator": { + "base": null, + "refs": { + "GetRecordsInput$ShardIterator": "

A shard iterator that was retrieved from a previous GetShardIterator operation. This iterator can be used to access the stream records in this shard.

", + "GetRecordsOutput$NextShardIterator": "

The next position in the shard from which to start sequentially reading stream records. If set to null, the shard has been closed and the requested iterator will not return any more data.

", + "GetShardIteratorOutput$ShardIterator": "

The position in the shard from which to start reading stream records sequentially. A shard iterator specifies this position using the sequence number of a stream record in a shard.

" + } + }, + "ShardIteratorType": { + "base": null, + "refs": { + "GetShardIteratorInput$ShardIteratorType": "

Determines how the shard iterator is used to start reading stream records from the shard:

" + } + }, + "Stream": { + "base": "

Represents all of the data describing a particular stream.

", + "refs": { + "StreamList$member": null + } + }, + "StreamArn": { + "base": null, + "refs": { + "DescribeStreamInput$StreamArn": "

The Amazon Resource Name (ARN) for the stream.

", + "GetShardIteratorInput$StreamArn": "

The Amazon Resource Name (ARN) for the stream.

", + "ListStreamsInput$ExclusiveStartStreamArn": "

The ARN (Amazon Resource Name) of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedStreamArn in the previous operation.

", + "ListStreamsOutput$LastEvaluatedStreamArn": "

The stream ARN of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.

If LastEvaluatedStreamArn is empty, then the \"last page\" of results has been processed and there is no more data to be retrieved.

If LastEvaluatedStreamArn is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedStreamArn is empty.

", + "Stream$StreamArn": "

The Amazon Resource Name (ARN) for the stream.

", + "StreamDescription$StreamArn": "

The Amazon Resource Name (ARN) for the stream.

" + } + }, + "StreamDescription": { + "base": "

Represents all of the data describing a particular stream.

", + "refs": { + "DescribeStreamOutput$StreamDescription": "

A complete description of the stream, including its creation date and time, the DynamoDB table associated with the stream, the shard IDs within the stream, and the beginning and ending sequence numbers of stream records within the shards.

" + } + }, + "StreamList": { + "base": null, + "refs": { + "ListStreamsOutput$Streams": "

A list of stream descriptors associated with the current account and endpoint.

" + } + }, + "StreamRecord": { + "base": "

A description of a single data modification that was performed on an item in a DynamoDB table.

", + "refs": { + "Record$dynamodb": "

The main body of the stream record, containing all of the DynamoDB-specific fields.

" + } + }, + "StreamStatus": { + "base": null, + "refs": { + "StreamDescription$StreamStatus": "

Indicates the current status of the stream:

" + } + }, + "StreamViewType": { + "base": null, + "refs": { + "StreamDescription$StreamViewType": "

Indicates the format of the records within this stream:

", + "StreamRecord$StreamViewType": "

The type of data from the modified DynamoDB item that was captured in this stream record:

" + } + }, + "String": { + "base": null, + "refs": { + "Record$eventID": "

A globally unique identifier for the event that was recorded in this stream record.

", + "Record$eventVersion": "

The version number of the stream record format. Currently, this is 1.0.

", + "Record$eventSource": "

The AWS service from which the stream record originated. For DynamoDB Streams, this is aws:dynamodb.

", + "Record$awsRegion": "

The region in which the GetRecords request was received.

", + "Stream$StreamLabel": "

A timestamp, in ISO 8601 format, for this stream.

Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

", + "StreamDescription$StreamLabel": "

A timestamp, in ISO 8601 format, for this stream.

Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:

" + } + }, + "StringAttributeValue": { + "base": null, + "refs": { + "AttributeValue$S": "

A String data type.

", + "StringSetAttributeValue$member": null + } + }, + "StringSetAttributeValue": { + "base": null, + "refs": { + "AttributeValue$SS": "

A String Set data type.

" + } + }, + "TableName": { + "base": null, + "refs": { + "ListStreamsInput$TableName": "

If this parameter is provided, then only the streams associated with this table name are returned.

", + "Stream$TableName": "

The DynamoDB table with which the stream is associated.

", + "StreamDescription$TableName": "

The DynamoDB table with which the stream is associated.

" + } + }, + "TrimmedDataAccessException": { + "base": "

The operation attempted to read past the oldest stream record in a shard.

In DynamoDB Streams, there is a 24 hour limit on data retention. Stream records whose age exceeds this limit are subject to removal (trimming) from the stream. You might receive a TrimmedDataAccessException if:

", + "refs": { + } + } + } +} diff --git a/tests/Integ/SmokeContext.php b/tests/Integ/SmokeContext.php index f58e76297d..63151335a3 100644 --- a/tests/Integ/SmokeContext.php +++ b/tests/Integ/SmokeContext.php @@ -26,6 +26,9 @@ class SmokeContext extends PHPUnit_Framework_Assert implements 'ElasticFileSystem' => [ 'region' => 'us-west-2', ], + 'DeviceFarm' => [ + 'region' => 'us-west-2', + ], ]; /**