Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Merge branch 'release-2.20.0'

  • Loading branch information...
commit 29a7fc6a6ce0851b9b85aed34a6c0087bd967482 2 parents 2fcb853 + d18eac8
Daniel G. Taylor danielgtaylor authored
Showing with 2,313 additions and 175 deletions.
  1. +4 −2 README.rst
  2. +4 −3 bin/sdbadmin
  3. +45 −1 boto/__init__.py
  4. +9 −1 boto/connection.py
  5. +66 −0 boto/directconnect/__init__.py
  6. +28 −0 boto/directconnect/exceptions.py
  7. +633 −0 boto/directconnect/layer1.py
  8. +6 −0 boto/dynamodb2/items.py
  9. +87 −91 boto/dynamodb2/layer1.py
  10. +5 −1 boto/dynamodb2/table.py
  11. +1 −1  boto/ec2/autoscale/__init__.py
  12. +2 −2 boto/ec2/autoscale/group.py
  13. +0 −7 boto/ec2/cloudwatch/metric.py
  14. +19 −1 boto/ec2/connection.py
  15. +2 −2 boto/ec2/elb/__init__.py
  16. +5 −0 boto/ec2/image.py
  17. +8 −5 boto/elasticache/layer1.py
  18. +45 −0 boto/kinesis/__init__.py
  19. +51 −0 boto/kinesis/exceptions.py
  20. +707 −0 boto/kinesis/layer1.py
  21. +1 −1  boto/rds/__init__.py
  22. +35 −4 boto/rds/dbinstance.py
  23. +32 −2 boto/rds/dbsnapshot.py
  24. +97 −37 boto/utils.py
  25. +1 −0  docs/source/index.rst
  26. +31 −0 docs/source/releasenotes/v2.20.0.rst
  27. +2 −1  setup.py
  28. 0  tests/integration/directconnect/__init__.py
  29. +40 −0 tests/integration/directconnect/test_directconnect.py
  30. 0  tests/integration/kinesis/__init__.py
  31. +84 −0 tests/integration/kinesis/test_kinesis.py
  32. 0  tests/unit/directconnect/__init__.py
  33. +58 −0 tests/unit/directconnect/test_layer1.py
  34. +10 −1 tests/unit/dynamodb2/test_table.py
  35. +23 −0 tests/unit/ec2/autoscale/test_group.py
  36. 0  tests/unit/elasticache/__init__.py
  37. +20 −0 tests/unit/elasticache/test_api_interface.py
  38. +41 −5 tests/unit/rds/test_connection.py
  39. +15 −0 tests/unit/rds/test_snapshot.py
  40. +32 −0 tests/unit/test_connection.py
  41. +64 −7 tests/unit/utils/test_utils.py
6 README.rst
View
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.19.0
+boto 2.20.0
-Released: 27-November-2013
+Released: 12-December-2013
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
@@ -23,6 +23,7 @@ At the moment, boto supports:
* Amazon Elastic Compute Cloud (EC2)
* Amazon Elastic Map Reduce (EMR)
* AutoScaling
+ * Amazon Kinesis
* Content Delivery
@@ -66,6 +67,7 @@ At the moment, boto supports:
* Amazon Route53
* Amazon Virtual Private Cloud (VPC)
* Elastic Load Balancing (ELB)
+ * AWS Direct Connect
* Payments and Billing
7 bin/sdbadmin
View
@@ -51,7 +51,7 @@ def confirm(message="Are you sure?"):
return choice and len(choice) > 0 and choice[0].lower() == "y"
-def dump_db(domain, file_name, use_json=False):
+def dump_db(domain, file_name, use_json=False, sort_attributes=False):
"""
Dump SDB domain to file
"""
@@ -59,7 +59,7 @@ def dump_db(domain, file_name, use_json=False):
if use_json:
for item in domain:
data = {"name": item.name, "attributes": item}
- print >> f, json.dumps(data)
+ print >> f, json.dumps(data, sort_keys=sort_attributes)
else:
doc = domain.to_xml(f)
@@ -113,6 +113,7 @@ if __name__ == "__main__":
parser.add_option("-a", "--all-domains", help="Operate on all domains", action="store_true", default=False, dest="all_domains")
if json:
parser.add_option("-j", "--use-json", help="Load/Store as JSON instead of XML", action="store_true", default=False, dest="json")
+ parser.add_option("-s", "--sort-attibutes", help="Sort the element attributes", action="store_true", default=False, dest="sort_attributes")
parser.add_option("-d", "--domain", help="Do functions on domain (may be more then one)", action="append", dest="domains")
parser.add_option("-f", "--file", help="Input/Output file we're operating on", dest="file_name")
parser.add_option("-r", "--region", help="Region (e.g. us-east-1[default] or eu-west-1)", default="us-east-1", dest="region_name")
@@ -170,7 +171,7 @@ if __name__ == "__main__":
file_name = options.file_name
else:
file_name = "%s.db" % domain.name
- dump_db(domain, file_name, options.json)
+ dump_db(domain, file_name, options.json, options.sort_attributes)
if options.load:
for domain in domains:
46 boto/__init__.py
View
@@ -36,7 +36,7 @@
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.19.0'
+__version__ = '2.20.0'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s Python/%s %s/%s' % (
@@ -744,6 +744,50 @@ def connect_cloudtrail(aws_access_key_id=None,
)
+def connect_directconnect(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to AWS DirectConnect
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.directconnect.layer1.DirectConnectConnection`
+ :return: A connection to the AWS DirectConnect service
+ """
+ from boto.directconnect.layer1 import DirectConnectConnection
+ return DirectConnectConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
+def connect_kinesis(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ """
+ Connect to Amazon Kinesis
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ rtype: :class:`boto.kinesis.layer1.KinesisConnection`
+ :return: A connection to the Amazon Kinesis service
+ """
+ from boto.kinesis.layer1 import KinesisConnection
+ return KinesisConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs
+ )
+
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True, is_latest=False):
10 boto/connection.py
View
@@ -840,6 +840,13 @@ def get_proxy_auth_header(self):
auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
return {'Proxy-Authorization': 'Basic %s' % auth}
+ def set_host_header(self, request):
+ try:
+ request.headers['Host'] = \
+ self._auth_handler.host_header(self.host, request)
+ except AttributeError:
+ request.headers['Host'] = self.host.split(':', 1)[0]
+
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
@@ -880,7 +887,8 @@ def _mexe(self, request, sender=None, override_num_retries=None,
# the port info. All others should be now be up to date and
# not include the port.
if 's3' not in self._required_auth_capability():
- request.headers['Host'] = self.host.split(':', 1)[0]
+ self.set_host_header(request)
+
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
66 boto/directconnect/__init__.py
View
@@ -0,0 +1,66 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the AWS DirectConnect service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.directconnect.layer1 import DirectConnectConnection
+
+ return [RegionInfo(name='us-east-1',
+ endpoint='directconnect.us-east-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='us-west-1',
+ endpoint='directconnect.us-west-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='us-west-2',
+ endpoint='directconnect.us-west-2.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='eu-west-1',
+ endpoint='directconnect.eu-west-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='ap-southeast-1',
+ endpoint='directconnect.ap-southeast-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='ap-southeast-2',
+ endpoint='directconnect.ap-southeast-2.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='ap-southeast-3',
+ endpoint='directconnect.ap-southeast-3.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ RegionInfo(name='sa-east-1',
+ endpoint='directconnect.sa-east-1.amazonaws.com',
+ connection_cls=DirectConnectConnection),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
28 boto/directconnect/exceptions.py
View
@@ -0,0 +1,28 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+class DirectConnectClientException(Exception):
+ pass
+
+
+class DirectConnectServerException(Exception):
+ pass
633 boto/directconnect/layer1.py
View
@@ -0,0 +1,633 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+import boto
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.directconnect import exceptions
+
+
+class DirectConnectConnection(AWSQueryConnection):
+ """
+ AWS Direct Connect makes it easy to establish a dedicated network
+ connection from your premises to Amazon Web Services (AWS). Using
+ AWS Direct Connect, you can establish private connectivity between
+ AWS and your data center, office, or colocation environment, which
+ in many cases can reduce your network costs, increase bandwidth
+ throughput, and provide a more consistent network experience than
+ Internet-based connections.
+
+ The AWS Direct Connect API Reference provides descriptions,
+ syntax, and usage examples for each of the actions and data types
+ for AWS Direct Connect. Use the following links to get started
+ using the AWS Direct Connect API Reference :
+
+
+ + `Actions`_: An alphabetical list of all AWS Direct Connect
+ actions.
+ + `Data Types`_: An alphabetical list of all AWS Direct Connect
+ data types.
+ + `Common Query Parameters`_: Parameters that all Query actions
+ can use.
+ + `Common Errors`_: Client and server errors that all actions can
+ return.
+ """
+ APIVersion = "2012-10-25"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "directconnect.us-east-1.amazonaws.com"
+ ServiceName = "DirectConnect"
+ TargetPrefix = "OvertureService"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "DirectConnectClientException": exceptions.DirectConnectClientException,
+ "DirectConnectServerException": exceptions.DirectConnectServerException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+
+ if 'host' not in kwargs:
+ kwargs['host'] = region.endpoint
+
+ AWSQueryConnection.__init__(self, **kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def allocate_connection_on_interconnect(self, bandwidth, connection_name,
+ owner_account, interconnect_id,
+ vlan):
+ """
+ Creates a hosted connection on an interconnect.
+
+ Allocates a VLAN number and a specified amount of bandwidth
+ for use by a hosted connection on the given interconnect.
+
+ :type bandwidth: string
+ :param bandwidth: Bandwidth of the connection.
+ Example: " 500Mbps "
+
+ Default: None
+
+ :type connection_name: string
+ :param connection_name: Name of the provisioned connection.
+ Example: " 500M Connection to AWS "
+
+ Default: None
+
+ :type owner_account: string
+ :param owner_account: Numeric account Id of the customer for whom the
+ connection will be provisioned.
+ Example: 123443215678
+
+ Default: None
+
+ :type interconnect_id: string
+ :param interconnect_id: ID of the interconnect on which the connection
+ will be provisioned.
+ Example: dxcon-456abc78
+
+ Default: None
+
+ :type vlan: integer
+ :param vlan: The dedicated VLAN provisioned to the connection.
+ Example: 101
+
+ Default: None
+
+ """
+ params = {
+ 'bandwidth': bandwidth,
+ 'connectionName': connection_name,
+ 'ownerAccount': owner_account,
+ 'interconnectId': interconnect_id,
+ 'vlan': vlan,
+ }
+ return self.make_request(action='AllocateConnectionOnInterconnect',
+ body=json.dumps(params))
+
+ def allocate_private_virtual_interface(self, connection_id,
+ owner_account,
+ new_private_virtual_interface_allocation):
+ """
+ Provisions a private virtual interface to be owned by a
+ different customer.
+
+ The owner of a connection calls this function to provision a
+ private virtual interface which will be owned by another AWS
+ customer.
+
+ Virtual interfaces created using this function must be
+ confirmed by the virtual interface owner by calling
+ ConfirmPrivateVirtualInterface. Until this step has been
+ completed, the virtual interface will be in 'Confirming'
+ state, and will not be available for handling traffic.
+
+ :type connection_id: string
+ :param connection_id: The connection ID on which the private virtual
+ interface is provisioned.
+ Default: None
+
+ :type owner_account: string
+ :param owner_account: The AWS account that will own the new private
+ virtual interface.
+ Default: None
+
+ :type new_private_virtual_interface_allocation: dict
+ :param new_private_virtual_interface_allocation: Detailed information
+ for the private virtual interface to be provisioned.
+ Default: None
+
+ """
+ params = {
+ 'connectionId': connection_id,
+ 'ownerAccount': owner_account,
+ 'newPrivateVirtualInterfaceAllocation': new_private_virtual_interface_allocation,
+ }
+ return self.make_request(action='AllocatePrivateVirtualInterface',
+ body=json.dumps(params))
+
+ def allocate_public_virtual_interface(self, connection_id, owner_account,
+ new_public_virtual_interface_allocation):
+ """
+ Provisions a public virtual interface to be owned by a
+ different customer.
+
+ The owner of a connection calls this function to provision a
+ public virtual interface which will be owned by another AWS
+ customer.
+
+ Virtual interfaces created using this function must be
+ confirmed by the virtual interface owner by calling
+ ConfirmPublicVirtualInterface. Until this step has been
+ completed, the virtual interface will be in 'Confirming'
+ state, and will not be available for handling traffic.
+
+ :type connection_id: string
+ :param connection_id: The connection ID on which the public virtual
+ interface is provisioned.
+ Default: None
+
+ :type owner_account: string
+ :param owner_account: The AWS account that will own the new public
+ virtual interface.
+ Default: None
+
+ :type new_public_virtual_interface_allocation: dict
+ :param new_public_virtual_interface_allocation: Detailed information
+ for the public virtual interface to be provisioned.
+ Default: None
+
+ """
+ params = {
+ 'connectionId': connection_id,
+ 'ownerAccount': owner_account,
+ 'newPublicVirtualInterfaceAllocation': new_public_virtual_interface_allocation,
+ }
+ return self.make_request(action='AllocatePublicVirtualInterface',
+ body=json.dumps(params))
+
+ def confirm_connection(self, connection_id):
+ """
+ Confirm the creation of a hosted connection on an
+ interconnect.
+
+ Upon creation, the hosted connection is initially in the
+ 'Ordering' state, and will remain in this state until the
+ owner calls ConfirmConnection to confirm creation of the
+ hosted connection.
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ """
+ params = {'connectionId': connection_id, }
+ return self.make_request(action='ConfirmConnection',
+ body=json.dumps(params))
+
+ def confirm_private_virtual_interface(self, virtual_interface_id,
+ virtual_gateway_id):
+ """
+ Accept ownership of a private virtual interface created by
+ another customer.
+
+ After the virtual interface owner calls this function, the
+ virtual interface will be created and attached to the given
+ virtual private gateway, and will be available for handling
+ traffic.
+
+ :type virtual_interface_id: string
+ :param virtual_interface_id: ID of the virtual interface.
+ Example: dxvif-123dfg56
+
+ Default: None
+
+ :type virtual_gateway_id: string
+ :param virtual_gateway_id: ID of the virtual private gateway that will
+ be attached to the virtual interface.
+ A virtual private gateway can be managed via the Amazon Virtual Private
+ Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
+
+ Default: None
+
+ """
+ params = {
+ 'virtualInterfaceId': virtual_interface_id,
+ 'virtualGatewayId': virtual_gateway_id,
+ }
+ return self.make_request(action='ConfirmPrivateVirtualInterface',
+ body=json.dumps(params))
+
+ def confirm_public_virtual_interface(self, virtual_interface_id):
+ """
+ Accept ownership of a public virtual interface created by
+ another customer.
+
+ After the virtual interface owner calls this function, the
+ specified virtual interface will be created and made available
+ for handling traffic.
+
+ :type virtual_interface_id: string
+ :param virtual_interface_id: ID of the virtual interface.
+ Example: dxvif-123dfg56
+
+ Default: None
+
+ """
+ params = {'virtualInterfaceId': virtual_interface_id, }
+ return self.make_request(action='ConfirmPublicVirtualInterface',
+ body=json.dumps(params))
+
+ def create_connection(self, location, bandwidth, connection_name):
+ """
+ Creates a new connection between the customer network and a
+ specific AWS Direct Connect location.
+
+ A connection links your internal network to an AWS Direct
+ Connect location over a standard 1 gigabit or 10 gigabit
+ Ethernet fiber-optic cable. One end of the cable is connected
+ to your router, the other to an AWS Direct Connect router. An
+ AWS Direct Connect location provides access to Amazon Web
+ Services in the region it is associated with. You can
+ establish connections with AWS Direct Connect locations in
+ multiple regions, but a connection in one region does not
+ provide connectivity to other regions.
+
+ :type location: string
+ :param location: Where the connection is located.
+ Example: EqSV5
+
+ Default: None
+
+ :type bandwidth: string
+ :param bandwidth: Bandwidth of the connection.
+ Example: 1Gbps
+
+ Default: None
+
+ :type connection_name: string
+ :param connection_name: The name of the connection.
+ Example: " My Connection to AWS "
+
+ Default: None
+
+ """
+ params = {
+ 'location': location,
+ 'bandwidth': bandwidth,
+ 'connectionName': connection_name,
+ }
+ return self.make_request(action='CreateConnection',
+ body=json.dumps(params))
+
+ def create_interconnect(self, interconnect_name, bandwidth, location):
+ """
+ Creates a new interconnect between a AWS Direct Connect
+ partner's network and a specific AWS Direct Connect location.
+
+ An interconnect is a connection which is capable of hosting
+ other connections. The AWS Direct Connect partner can use an
+ interconnect to provide sub-1Gbps AWS Direct Connect service
+ to tier 2 customers who do not have their own connections.
+ Like a standard connection, an interconnect links the AWS
+ Direct Connect partner's network to an AWS Direct Connect
+ location over a standard 1 Gbps or 10 Gbps Ethernet fiber-
+ optic cable. One end is connected to the partner's router, the
+ other to an AWS Direct Connect router.
+
+ For each end customer, the AWS Direct Connect partner
+ provisions a connection on their interconnect by calling
+ AllocateConnectionOnInterconnect. The end customer can then
+ connect to AWS resources by creating a virtual interface on
+ their connection, using the VLAN assigned to them by the AWS
+ Direct Connect partner.
+
+ :type interconnect_name: string
+ :param interconnect_name: The name of the interconnect.
+ Example: " 1G Interconnect to AWS "
+
+ Default: None
+
+ :type bandwidth: string
+ :param bandwidth: The port bandwidth
+ Example: 1Gbps
+
+ Default: None
+
+ Available values: 1Gbps,10Gbps
+
+ :type location: string
+ :param location: Where the interconnect is located
+ Example: EqSV5
+
+ Default: None
+
+ """
+ params = {
+ 'interconnectName': interconnect_name,
+ 'bandwidth': bandwidth,
+ 'location': location,
+ }
+ return self.make_request(action='CreateInterconnect',
+ body=json.dumps(params))
+
+ def create_private_virtual_interface(self, connection_id,
+ new_private_virtual_interface):
+ """
+ Creates a new private virtual interface. A virtual interface
+ is the VLAN that transports AWS Direct Connect traffic. A
+ private virtual interface supports sending traffic to a single
+ virtual private cloud (VPC).
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ :type new_private_virtual_interface: dict
+ :param new_private_virtual_interface: Detailed information for the
+ private virtual interface to be created.
+ Default: None
+
+ """
+ params = {
+ 'connectionId': connection_id,
+ 'newPrivateVirtualInterface': new_private_virtual_interface,
+ }
+ return self.make_request(action='CreatePrivateVirtualInterface',
+ body=json.dumps(params))
+
+ def create_public_virtual_interface(self, connection_id,
+ new_public_virtual_interface):
+ """
+ Creates a new public virtual interface. A virtual interface is
+ the VLAN that transports AWS Direct Connect traffic. A public
+ virtual interface supports sending traffic to public services
+ of AWS such as Amazon Simple Storage Service (Amazon S3).
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ :type new_public_virtual_interface: dict
+ :param new_public_virtual_interface: Detailed information for the
+ public virtual interface to be created.
+ Default: None
+
+ """
+ params = {
+ 'connectionId': connection_id,
+ 'newPublicVirtualInterface': new_public_virtual_interface,
+ }
+ return self.make_request(action='CreatePublicVirtualInterface',
+ body=json.dumps(params))
+
+ def delete_connection(self, connection_id):
+ """
+ Deletes the connection.
+
+ Deleting a connection only stops the AWS Direct Connect port
+ hour and data transfer charges. You need to cancel separately
+ with the providers any services or charges for cross-connects
+ or network circuits that connect you to the AWS Direct Connect
+ location.
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ """
+ params = {'connectionId': connection_id, }
+ return self.make_request(action='DeleteConnection',
+ body=json.dumps(params))
+
+ def delete_interconnect(self, interconnect_id):
+ """
+ Deletes the specified interconnect.
+
+ :type interconnect_id: string
+ :param interconnect_id: The ID of the interconnect.
+ Example: dxcon-abc123
+
+ """
+ params = {'interconnectId': interconnect_id, }
+ return self.make_request(action='DeleteInterconnect',
+ body=json.dumps(params))
+
+ def delete_virtual_interface(self, virtual_interface_id):
+ """
+ Deletes a virtual interface.
+
+ :type virtual_interface_id: string
+ :param virtual_interface_id: ID of the virtual interface.
+ Example: dxvif-123dfg56
+
+ Default: None
+
+ """
+ params = {'virtualInterfaceId': virtual_interface_id, }
+ return self.make_request(action='DeleteVirtualInterface',
+ body=json.dumps(params))
+
+ def describe_connections(self, connection_id=None):
+ """
+ Displays all connections in this region.
+
+ If a connection ID is provided, the call returns only that
+ particular connection.
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ """
+ params = {}
+ if connection_id is not None:
+ params['connectionId'] = connection_id
+ return self.make_request(action='DescribeConnections',
+ body=json.dumps(params))
+
+ def describe_connections_on_interconnect(self, interconnect_id):
+ """
+ Return a list of connections that have been provisioned on the
+ given interconnect.
+
+ :type interconnect_id: string
+ :param interconnect_id: ID of the interconnect on which a list of
+ connection is provisioned.
+ Example: dxcon-abc123
+
+ Default: None
+
+ """
+ params = {'interconnectId': interconnect_id, }
+ return self.make_request(action='DescribeConnectionsOnInterconnect',
+ body=json.dumps(params))
+
+ def describe_interconnects(self, interconnect_id=None):
+ """
+ Returns a list of interconnects owned by the AWS account.
+
+ If an interconnect ID is provided, it will only return this
+ particular interconnect.
+
+ :type interconnect_id: string
+ :param interconnect_id: The ID of the interconnect.
+ Example: dxcon-abc123
+
+ """
+ params = {}
+ if interconnect_id is not None:
+ params['interconnectId'] = interconnect_id
+ return self.make_request(action='DescribeInterconnects',
+ body=json.dumps(params))
+
+ def describe_locations(self):
+ """
+ Returns the list of AWS Direct Connect locations in the
+ current AWS region. These are the locations that may be
+ selected when calling CreateConnection or CreateInterconnect.
+ """
+ params = {}
+ return self.make_request(action='DescribeLocations',
+ body=json.dumps(params))
+
+ def describe_virtual_gateways(self):
+ """
+ Returns a list of virtual private gateways owned by the AWS
+ account.
+
+ You can create one or more AWS Direct Connect private virtual
+ interfaces linking to a virtual private gateway. A virtual
+ private gateway can be managed via Amazon Virtual Private
+ Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
+ """
+ params = {}
+ return self.make_request(action='DescribeVirtualGateways',
+ body=json.dumps(params))
+
+ def describe_virtual_interfaces(self, connection_id=None,
+ virtual_interface_id=None):
+ """
+ Displays all virtual interfaces for an AWS account. Virtual
+ interfaces deleted fewer than 15 minutes before
+ DescribeVirtualInterfaces is called are also returned. If a
+ connection ID is included then only virtual interfaces
+ associated with this connection will be returned. If a virtual
+ interface ID is included then only a single virtual interface
+ will be returned.
+
+ A virtual interface (VLAN) transmits the traffic between the
+ AWS Direct Connect location and the customer.
+
+ If a connection ID is provided, only virtual interfaces
+ provisioned on the specified connection will be returned. If a
+ virtual interface ID is provided, only this particular virtual
+ interface will be returned.
+
+ :type connection_id: string
+ :param connection_id: ID of the connection.
+ Example: dxcon-fg5678gh
+
+ Default: None
+
+ :type virtual_interface_id: string
+ :param virtual_interface_id: ID of the virtual interface.
+ Example: dxvif-123dfg56
+
+ Default: None
+
+ """
+ params = {}
+ if connection_id is not None:
+ params['connectionId'] = connection_id
+ if virtual_interface_id is not None:
+ params['virtualInterfaceId'] = virtual_interface_id
+ return self.make_request(action='DescribeVirtualInterfaces',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read()
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
6 boto/dynamodb2/items.py
View
@@ -19,6 +19,9 @@ class Item(object):
This object presents a dictionary-like interface for accessing/storing
data. It also tries to intelligently track how data has changed throughout
the life of the instance, to be as efficient as possible about updates.
+
+ Empty items, or items that have no data, are considered falsey.
+
"""
def __init__(self, table, data=None, loaded=False):
"""
@@ -105,6 +108,9 @@ def __iter__(self):
def __contains__(self, key):
return key in self._data
+ def __nonzero__(self):
+ return bool(self._data)
+
def _determine_alterations(self):
"""
Checks the ``-orig_data`` against the ``_data`` to determine what
178 boto/dynamodb2/layer1.py
View
@@ -35,10 +35,9 @@
class DynamoDBConnection(AWSQueryConnection):
"""
- Amazon DynamoDB is a fast, highly scalable, highly available,
- cost-effective non-relational database service. Amazon DynamoDB
- removes traditional scalability limitations on data storage while
- maintaining low latency and predictable performance.
+ Amazon DynamoDB **Overview**
+ This is the Amazon DynamoDB API Reference. This guide provides
+ descriptions and samples of the Amazon DynamoDB API.
"""
APIVersion = "2012-08-10"
DefaultRegionName = "us-east-1"
@@ -130,7 +129,7 @@ def batch_get_item(self, request_items, return_consumed_capacity=None):
result. Requests for nonexistent items consume the minimum
read capacity units according to the type of read. For more
information, see `Capacity Units Calculations`_ in the Amazon
- DynamoDB Developer Guide .
+ DynamoDB Developer Guide.
:type request_items: map
:param request_items:
@@ -150,7 +149,9 @@ def batch_get_item(self, request_items, return_consumed_capacity=None):
`False` (the default), an eventually consistent read is used.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
"""
params = {'RequestItems': request_items, }
@@ -256,7 +257,9 @@ def batch_write_item(self, request_items, return_consumed_capacity=None,
match those of the schema in the table's attribute definition.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@@ -274,7 +277,8 @@ def batch_write_item(self, request_items, return_consumed_capacity=None,
body=json.dumps(params))
def create_table(self, attribute_definitions, table_name, key_schema,
- provisioned_throughput, local_secondary_indexes=None):
+ provisioned_throughput, local_secondary_indexes=None,
+ global_secondary_indexes=None):
"""
The CreateTable operation adds a new table to your account. In
an AWS account, table names must be unique within each region.
@@ -306,7 +310,7 @@ def create_table(self, attribute_definitions, table_name, key_schema,
:param key_schema: Specifies the attributes that make up the primary
key for the table. The attributes in KeySchema must also be defined
in the AttributeDefinitions array. For more information, see `Data
- Model`_ in the Amazon DynamoDB Developer Guide .
+ Model`_ in the Amazon DynamoDB Developer Guide.
Each KeySchemaElement in the array is composed of:
@@ -323,7 +327,7 @@ def create_table(self, attribute_definitions, table_name, key_schema,
KeyType of `RANGE`.
For more information, see `Specifying the Primary Key`_ in the Amazon
- DynamoDB Developer Guide .
+ DynamoDB Developer Guide.
:type local_secondary_indexes: list
:param local_secondary_indexes:
@@ -360,8 +364,15 @@ def create_table(self, attribute_definitions, table_name, key_schema,
attribute into two different indexes, this counts as two distinct
attributes when determining the total.
+ :type global_secondary_indexes: list
+ :param global_secondary_indexes:
+
:type provisioned_throughput: dict
- :param provisioned_throughput:
+ :param provisioned_throughput: The provisioned throughput settings for
+ the specified table. The settings can be modified using the
+ UpdateTable operation.
+ For current minimum and maximum provisioned throughput values, see
+ `Limits`_ in the Amazon DynamoDB Developer Guide.
"""
params = {
@@ -372,6 +383,8 @@ def create_table(self, attribute_definitions, table_name, key_schema,
}
if local_secondary_indexes is not None:
params['LocalSecondaryIndexes'] = local_secondary_indexes
+ if global_secondary_indexes is not None:
+ params['GlobalSecondaryIndexes'] = global_secondary_indexes
return self.make_request(action='CreateTable',
body=json.dumps(params))
@@ -459,7 +472,9 @@ def delete_item(self, table_name, key, expected=None, return_values=None,
+ `ALL_OLD` - The content of the old item is returned.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@@ -496,15 +511,6 @@ def delete_table(self, table_name):
operations, such as GetItem and PutItem , on a table in the
`DELETING` state until the table deletion is complete.
- Tables are unique among those associated with the AWS Account
- issuing the request, and the AWS region that receives the
- request (such as dynamodb.us-east-1.amazonaws.com). Each
- Amazon DynamoDB endpoint is entirely independent. For example,
- if you have two tables called "MyTable," one in dynamodb.us-
- east-1.amazonaws.com and one in dynamodb.us-
- west-1.amazonaws.com, they are completely independent and do
- not share any data; deleting one does not delete the other.
-
When you delete a table, any local secondary indexes on that
table are also deleted.
@@ -564,7 +570,9 @@ def get_item(self, table_name, key, attributes_to_get=None,
are used.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
"""
params = {'TableName': table_name, 'Key': key, }
@@ -582,14 +590,6 @@ def list_tables(self, exclusive_start_table_name=None, limit=None):
Returns an array of all the tables associated with the current
account and endpoint.
- Each Amazon DynamoDB endpoint is entirely independent. For
- example, if you have two tables called "MyTable," one in
- dynamodb.us-east-1.amazonaws.com and one in dynamodb.us-
- west-1.amazonaws.com , they are completely independent and do
- not share any data. The ListTables operation returns all of
- the table names associated with the account making the
- request, for the endpoint that receives the request.
-
:type exclusive_start_table_name: string
:param exclusive_start_table_name: The name of the table that starts
the list. If you already ran a ListTables operation and received a
@@ -639,7 +639,7 @@ def put_item(self, table_name, item, expected=None, return_values=None,
primary key attribute, or attributes.
For more information about using this API, see `Working with
- Items`_ in the Amazon DynamoDB Developer Guide .
+ Items`_ in the Amazon DynamoDB Developer Guide.
:type table_name: string
:param table_name: The name of the table to contain the item.
@@ -653,7 +653,7 @@ def put_item(self, table_name, item, expected=None, return_values=None,
the table's attribute definition.
For more information about primary keys, see `Primary Key`_ in the
- Amazon DynamoDB Developer Guide .
+ Amazon DynamoDB Developer Guide.
Each element in the Item map is an AttributeValue object.
@@ -714,7 +714,9 @@ def put_item(self, table_name, item, expected=None, return_values=None,
the content of the old item is returned.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@@ -834,7 +836,7 @@ def query(self, table_name, index_name=None, select=None,
limit, it stops the operation and returns the matching values up to
the limit, and a LastEvaluatedKey to apply in a subsequent
operation to continue the operation. For more information see
- `Query and Scan`_ in the Amazon DynamoDB Developer Guide .
+ `Query and Scan`_ in the Amazon DynamoDB Developer Guide.
:type consistent_read: boolean
:param consistent_read: If set to `True`, then the operation uses
@@ -846,7 +848,7 @@ def query(self, table_name, index_name=None, select=None,
The selection criteria for the query.
For a query on a table, you can only have conditions on the table
- primary key attributes. you must specify the hash key attribute
+ primary key attributes. You must specify the hash key attribute
name and value as an `EQ` condition. You can optionally specify a
second condition, referring to the range key attribute.
@@ -878,7 +880,7 @@ def query(self, table_name, index_name=None, select=None,
example, equals, greater than, less than, etc. Valid comparison
operators for Query: `EQ | LE | LT | GE | GT | BEGINS_WITH |
BETWEEN` For information on specifying data types in JSON, see
- `JSON Data Format`_ in the Amazon DynamoDB Developer Guide . The
+ `JSON Data Format`_ in the Amazon DynamoDB Developer Guide. The
following are descriptions of each comparison operator.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
@@ -938,18 +940,16 @@ def query(self, table_name, index_name=None, select=None,
ascending order.
:type exclusive_start_key: map
- :param exclusive_start_key: The primary key of the item from which to
- continue an earlier operation. An earlier operation might provide
- this value as the LastEvaluatedKey if that operation was
- interrupted before completion; either because of the result set
- size or because of the setting for Limit . The LastEvaluatedKey can
- be passed back in a new request to continue the operation from that
- point.
+ :param exclusive_start_key: The primary key of the first item that this
+ operation will evaluate. Use the value that was returned for
+ LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
"""
params = {'TableName': table_name, }
@@ -994,10 +994,10 @@ def scan(self, table_name, attributes_to_get=None, limit=None,
The result set is eventually consistent.
By default, Scan operations proceed sequentially; however, for
- faster performance on large tables, applications can perform a
+ faster performance on large tables, applications can request a
parallel Scan by specifying the Segment and TotalSegments
parameters. For more information, see `Parallel Scan`_ in the
- Amazon DynamoDB Developer Guide .
+ Amazon DynamoDB Developer Guide.
:type table_name: string
:param table_name: The name of the table containing the requested
@@ -1020,7 +1020,7 @@ def scan(self, table_name, attributes_to_get=None, limit=None,
limit, it stops the operation and returns the matching values up to
the limit, and a LastEvaluatedKey to apply in a subsequent
operation to continue the operation. For more information see
- `Query and Scan`_ in the Amazon DynamoDB Developer Guide .
+ `Query and Scan`_ in the Amazon DynamoDB Developer Guide.
:type select: string
:param select: The attributes to be returned in the result. You can
@@ -1084,7 +1084,7 @@ def scan(self, table_name, attributes_to_get=None, limit=None,
operators for Scan: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL
| CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` For
information on specifying data types in JSON, see `JSON Data
- Format`_ in the Amazon DynamoDB Developer Guide . The following are
+ Format`_ in the Amazon DynamoDB Developer Guide. The following are
descriptions of each comparison operator.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
@@ -1164,44 +1164,27 @@ def scan(self, table_name, attributes_to_get=None, limit=None,
"2", "1"]}`
:type exclusive_start_key: map
- :param exclusive_start_key: The primary key of the item from which to
- continue an earlier operation. An earlier operation might provide
- this value as the LastEvaluatedKey if that operation was
- interrupted before completion; either because of the result set
- size or because of the setting for Limit . The LastEvaluatedKey can
- be passed back in a new request to continue the operation from that
- point.
+ :param exclusive_start_key: The primary key of the first item that this
+ operation will evaluate. Use the value that was returned for
+ LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
- If you are performing a parallel scan, the value of ExclusiveStartKey
- must fall into the key space of the Segment being scanned. For
- example, suppose that there are two application threads scanning a
- table using the following Scan parameters
-
-
- + Thread 0: Segment =0; TotalSegments =2
- + Thread 1: Segment =1; TotalSegments =2
-
-
- Now suppose that the Scan request for Thread 0 completed and returned a
- LastEvaluatedKey of "X". Because "X" is part of Segment 0's key
- space, it cannot be used anywhere else in the table. If Thread 1
- were to issue another Scan request with an ExclusiveStartKey of
- "X", Amazon DynamoDB would throw an InputValidationError because
- hash key "X" cannot be in Segment 1.
+ In a parallel scan, a Scan request that includes ExclusiveStartKey must
+ specify the same segment whose previous Scan returned the
+ corresponding value of LastEvaluatedKey .
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type total_segments: integer
- :param total_segments: For parallel Scan requests, TotalSegments
- represents the total number of segments for a table that is being
- scanned. Segments are a way to logically divide a table into
- equally sized portions, for the duration of the Scan request. The
- value of TotalSegments corresponds to the number of application
- "workers" (such as threads or processes) that will perform the
- parallel Scan . For example, if you want to scan a table using four
+ :param total_segments: For a parallel Scan request, TotalSegments
+ represents the total number of segments into which the Scan
+ operation will be divided. The value of TotalSegments corresponds
+ to the number of application workers that will perform the parallel
+ scan. For example, if you want to scan a table using four
application threads, you would specify a TotalSegments value of 4.
The value for TotalSegments must be greater than or equal to 1, and
less than or equal to 4096. If you specify a TotalSegments value of
@@ -1210,15 +1193,17 @@ def scan(self, table_name, attributes_to_get=None, limit=None,
If you specify TotalSegments , you must also specify Segment .
:type segment: integer
- :param segment: For parallel Scan requests, Segment identifies an
- individual segment to be scanned by an application "worker" (such
- as a thread or a process). Each worker issues a Scan request with a
- distinct value for the segment it will scan.
+ :param segment: For a parallel Scan request, Segment identifies an
+ individual segment to be scanned by an application worker.
Segment IDs are zero-based, so the first segment is always 0. For
example, if you want to scan a table using four application
threads, the first thread would specify a Segment value of 0, the
second thread would specify 1, and so on.
+ The value of LastEvaluatedKey returned from a parallel Scan request
+ must be used as ExclusiveStartKey with the same Segment ID in a
+ subsequent Scan operation.
+
The value for Segment must be greater than or equal to 0, and less than
the value provided for TotalSegments .
@@ -1411,7 +1396,9 @@ def update_item(self, table_name, key, attribute_updates=None,
returned.
:type return_consumed_capacity: string
- :param return_consumed_capacity:
+ :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
+ included in the response; if set to `NONE` (the default),
+ ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@@ -1434,7 +1421,8 @@ def update_item(self, table_name, key, attribute_updates=None,
return self.make_request(action='UpdateItem',
body=json.dumps(params))
- def update_table(self, table_name, provisioned_throughput):
+ def update_table(self, table_name, provisioned_throughput=None,
+ global_secondary_index_updates=None):
"""
Updates the provisioned throughput for the given table.
Setting the throughput for a table helps you manage
@@ -1443,7 +1431,7 @@ def update_table(self, table_name, provisioned_throughput):
The provisioned throughput values can be upgraded or
downgraded based on the maximums and minimums listed in the
- `Limits`_ section in the Amazon DynamoDB Developer Guide .
+ `Limits`_ section in the Amazon DynamoDB Developer Guide.
The table must be in the `ACTIVE` state for this operation to
succeed. UpdateTable is an asynchronous operation; while
@@ -1462,13 +1450,21 @@ def update_table(self, table_name, provisioned_throughput):
:param table_name: The name of the table to be updated.
:type provisioned_throughput: dict
- :param provisioned_throughput:
+ :param provisioned_throughput: The provisioned throughput settings for
+ the specified table. The settings can be modified using the
+ UpdateTable operation.
+ For current minimum and maximum provisioned throughput values, see
+ `Limits`_ in the Amazon DynamoDB Developer Guide.
+
+ :type global_secondary_index_updates: list
+ :param global_secondary_index_updates:
"""
- params = {
- 'TableName': table_name,
- 'ProvisionedThroughput': provisioned_throughput,
- }
+ params = {'TableName': table_name, }
+ if provisioned_throughput is not None:
+ params['ProvisionedThroughput'] = provisioned_throughput
+ if global_secondary_index_updates is not None:
+ params['GlobalSecondaryIndexUpdates'] = global_secondary_index_updates
return self.make_request(action='UpdateTable',
body=json.dumps(params))
6 boto/dynamodb2/table.py
View
@@ -424,7 +424,7 @@ def lookup(self, *args, **kwargs):
with boto.dynamodb. Unlike get_item, it takes hash_key and range_key first,
although you may still specify keyword arguments instead.
- Also unlike the get_item command, if the returned item has no keys
+ Also unlike the get_item command, if the returned item has no keys
(i.e., it does not exist in DynamoDB), a None result is returned, instead
of an empty key object.
@@ -668,6 +668,10 @@ def _build_filters(self, filter_kwargs, using=QUERY_OPERATORS):
lookup['AttributeValueList'].append(
self._dynamizer.encode(value[1])
)
+ # Special-case the ``IN`` case
+ elif field_bits[-1] == 'in':
+ for val in value:
+ lookup['AttributeValueList'].append(self._dynamizer.encode(val))
else:
# Fix up the value for encoding, because it was built to only work
# with ``set``s.
2  boto/ec2/autoscale/__init__.py
View
@@ -785,7 +785,7 @@ def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=Fals
params = {'AutoScalingGroupName': group_name,
'DesiredCapacity': desired_capacity}
if honor_cooldown:
- params['HonorCooldown'] = json.dumps('True')
+ params['HonorCooldown'] = 'true'
return self.get_status('SetDesiredCapacity', params)
4 boto/ec2/autoscale/group.py
View
@@ -129,8 +129,8 @@ def __init__(self, connection=None, name=None,
:param health_check_type: The service you want the health status from,
Amazon EC2 or Elastic Load Balancer.
- :type launch_config_name: str or LaunchConfiguration
- :param launch_config_name: Name of launch configuration (required).
+ :type launch_config: str or LaunchConfiguration
+ :param launch_config: Name of launch configuration (required).
:type load_balancers: list
:param load_balancers: List of load balancers.
7 boto/ec2/cloudwatch/metric.py
View
@@ -77,13 +77,6 @@ def query(self, start_time, end_time, statistics, unit=None, period=60):
:param statistics: A list of statistics names Valid values:
Average | Sum | SampleCount | Maximum | Minimum
- :type dimensions: dict
- :param dimensions: A dictionary of dimension key/values where
- the key is the dimension name and the value
- is either a scalar value or an iterator
- of values to be associated with that
- dimension.
-
:type unit: string
:param unit: The unit for the metric. Value values are:
Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |
20 boto/ec2/connection.py
View
@@ -752,6 +752,11 @@ def run_instances(self, image_id, min_count=1, max_count=1,
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
+ * g2.2xlarge
+ * i2.xlarge
+ * i2.2xlarge
+ * i2.4xlarge
+ * i2.8xlarge
:type placement: string
:param placement: The Availability Zone to launch the instance into.
@@ -1397,16 +1402,29 @@ def request_spot_instances(self, price, image_id, count=1, type='one-time',
:type instance_type: string
:param instance_type: The type of instance to run:
+ * t1.micro
* m1.small
+ * m1.medium
* m1.large
* m1.xlarge
+ * m3.xlarge
+ * m3.2xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
+ * cr1.8xlarge
+ * hi1.4xlarge
+ * hs1.8xlarge
* cc1.4xlarge
- * t1.micro
+ * cg1.4xlarge
+ * cc2.8xlarge
+ * g2.2xlarge
+ * i2.xlarge
+ * i2.2xlarge
+ * i2.4xlarge
+ * i2.8xlarge
:type placement: string
:param placement: The availability zone in which to launch
4 boto/ec2/elb/__init__.py
View
@@ -159,7 +159,7 @@ def create_load_balancer(self, name, zones, listeners=None, subnets=None,
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
- 'HTTPS'; SSLCertificateID is the ARN of a AWS AIM
+ 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type subnets: list of strings
@@ -264,7 +264,7 @@ def create_load_balancer_listeners(self, name, listeners=None, complex_listeners
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
- 'HTTPS'; SSLCertificateID is the ARN of a AWS AIM
+ 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type complex_listeners: List of tuples
5 boto/ec2/image.py
View
@@ -218,6 +218,11 @@ def run(self, min_count=1, max_count=1, key_name=None,
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
+ * g2.2xlarge
+ * i2.xlarge
+ * i2.2xlarge
+ * i2.4xlarge
+ * i2.8xlarge
:type placement: string
:param placement: The Availability Zone to launch the instance into.
13 boto/elasticache/layer1.py
View
@@ -99,8 +99,8 @@ def authorize_cache_security_group_ingress(self,
verb='POST',
path='/', params=params)
- def create_cache_cluster(self, cache_cluster_id, num_cache_nodes,
- cache_node_type, engine,
+ def create_cache_cluster(self, cache_cluster_id, num_cache_nodes=None,
+ cache_node_type=None, engine=None,
replication_group_id=None, engine_version=None,
cache_parameter_group_name=None,
cache_subnet_group_name=None,
@@ -244,10 +244,13 @@ def create_cache_cluster(self, cache_cluster_id, num_cache_nodes,
"""
params = {
'CacheClusterId': cache_cluster_id,
- 'NumCacheNodes': num_cache_nodes,
- 'CacheNodeType': cache_node_type,
- 'Engine': engine,
}
+ if num_cache_nodes is not None:
+ params['NumCacheNodes'] = num_cache_nodes
+ if cache_node_type is not None:
+ params['CacheNodeType'] = cache_node_type
+ if engine is not None:
+ params['Engine'] = engine
if replication_group_id is not None:
params['ReplicationGroupId'] = replication_group_id
if engine_version is not None:
45 boto/kinesis/__init__.py
View
@@ -0,0 +1,45 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the Amazon Kinesis service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.kinesis.layer1 import KinesisConnection
+
+ return [RegionInfo(name='us-east-1',
+ endpoint='kinesis.us-east-1.amazonaws.com',
+ connection_cls=KinesisConnection),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
51 boto/kinesis/exceptions.py
View
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.exception import BotoServerError
+
+
+class ProvisionedThroughputExceededException(BotoServerError):
+ pass
+
+
+class LimitExceededException(BotoServerError):
+ pass
+
+
+class ExpiredIteratorException(BotoServerError):
+ pass
+
+
+class ResourceInUseException(BotoServerError):
+ pass
+
+
+class ResourceNotFoundException(BotoServerError):
+ pass
+
+
+class InvalidArgumentException(BotoServerError):
+ pass
+
+
+class SubscriptionRequiredException(BotoServerError):
+ pass
707 boto/kinesis/layer1.py
View
@@ -0,0 +1,707 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+import base64
+import boto
+
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.kinesis import exceptions
+
+
+class KinesisConnection(AWSQueryConnection):
+ """
+ Amazon Kinesis Service API Reference
+ Amazon Kinesis is a managed service that scales elastically for
+ real time processing of streaming big data.
+ """
+ APIVersion = "2013-12-02"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
+ ServiceName = "Kinesis"
+ TargetPrefix = "Kinesis_20131202"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
+ "LimitExceededException": exceptions.LimitExceededException,
+ "ExpiredIteratorException": exceptions.ExpiredIteratorException,
+ "ResourceInUseException": exceptions.ResourceInUseException,
+ "ResourceNotFoundException": exceptions.ResourceNotFoundException,
+ "InvalidArgumentException": exceptions.InvalidArgumentException,
+ "SubscriptionRequiredException": exceptions.SubscriptionRequiredException
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ if 'host' not in kwargs:
+ kwargs['host'] = region.endpoint
+ AWSQueryConnection.__init__(self, **kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def create_stream(self, stream_name, shard_count):
+ """
+ This operation adds a new Amazon Kinesis stream to your AWS
+ account. A stream captures and transports data records that
+ are continuously emitted from different data sources or
+ producers . Scale-out within an Amazon Kinesis stream is
+ explicitly supported by means of shards, which are uniquely
+ identified groups of data records in an Amazon Kinesis stream.
+
+ You specify and control the number of shards that a stream is
+ composed of. Each shard can support up to 5 read transactions
+ per second up to a maximum total of 2 MB of data read per
+ second. Each shard can support up to 1000 write transactions
+ per second up to a maximum total of 1 MB data written per
+ second. You can add shards to a stream if the amount of data
+ input increases and you can remove shards if the amount of
+ data input decreases.
+
+ The stream name identifies the stream. The name is scoped to
+ the AWS account used by the application. It is also scoped by
+ region. That is, two streams in two different accounts can
+ have the same name, and two streams in the same account, but
+ in two different regions, can have the same name.
+
+ `CreateStream` is an asynchronous operation. Upon receiving a
+ `CreateStream` request, Amazon Kinesis immediately returns and
+ sets the stream status to CREATING. After the stream is
+ created, Amazon Kinesis sets the stream status to ACTIVE. You
+ should perform read and write operations only on an ACTIVE
+ stream.
+
+ You receive a `LimitExceededException` when making a
+ `CreateStream` request if you try to do one of the following:
+
+
+ + Have more than five streams in the CREATING state at any
+ point in time.
+ + Create more shards than are authorized for your account.
+
+
+ **Note:** The default limit for an AWS account is two shards
+ per stream. If you need to create a stream with more than two
+ shards, contact AWS Support to increase the limit on your
+ account.
+
+ You can use the `DescribeStream` operation to check the stream
+ status, which is returned in `StreamStatus`.
+
+ `CreateStream` has a limit of 5 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: A name to identify the stream. The stream name is
+ scoped to the AWS account used by the application that creates the
+ stream. It is also scoped by region. That is, two streams in two
+ different AWS accounts can have the same name, and two streams in
+ the same AWS account, but in two different regions, can have the
+ same name.
+
+ :type shard_count: integer
+ :param shard_count: The number of shards that the stream will use. The
+ throughput of the stream is a function of the number of shards;
+ more shards are required for greater provisioned throughput.
+ **Note:** The default limit for an AWS account is two shards per
+ stream. If you need to create a stream with more than two shards,
+ contact AWS Support to increase the limit on your account.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'ShardCount': shard_count,
+ }
+ return self.make_request(action='CreateStream',
+ body=json.dumps(params))
+
+ def delete_stream(self, stream_name):
+ """
+ This operation deletes a stream and all of its shards and
+ data. You must shut down any applications that are operating
+ on the stream before you delete the stream. If an application
+ attempts to operate on a deleted stream, it will receive the
+ exception `ResourceNotFoundException`.
+
+ If the stream is in the ACTIVE state, you can delete it. After
+ a `DeleteStream` request, the specified stream is in the
+ DELETING state until Amazon Kinesis completes the deletion.
+
+ **Note:** Amazon Kinesis might continue to accept data read
+ and write operations, such as PutRecord and GetRecords, on a
+ stream in the DELETING state until the stream deletion is
+ complete.
+
+ When you delete a stream, any shards in that stream are also
+ deleted.
+
+ You can use the DescribeStream operation to check the state of
+ the stream, which is returned in `StreamStatus`.
+
+ `DeleteStream` has a limit of 5 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream to delete.
+
+ """
+ params = {'StreamName': stream_name, }
+ return self.make_request(action='DeleteStream',
+ body=json.dumps(params))
+
+ def describe_stream(self, stream_name, limit=None,
+ exclusive_start_shard_id=None):
+ """
+ This operation returns the following information about the
+ stream: the current status of the stream, the stream Amazon
+ Resource Name (ARN), and an array of shard objects that
+ comprise the stream. For each shard object there is
+ information about the hash key and sequence number ranges that
+ the shard spans, and the IDs of any earlier shards that played
+ in a role in a MergeShards or SplitShard operation that
+ created the shard. A sequence number is the identifier
+ associated with every record ingested in the Amazon Kinesis
+ stream. The sequence number is assigned by the Amazon Kinesis
+ service when a record is put into the stream.
+
+ You can limit the number of returned shards using the `Limit`
+ parameter. The number of shards in a stream may be too large
+ to return from a single call to `DescribeStream`. You can
+ detect this by using the `HasMoreShards` flag in the returned
+ output. `HasMoreShards` is set to `True` when there is more
+ data available.
+
+ If there are more shards available, you can request more
+ shards by using the shard ID of the last shard returned by the
+ `DescribeStream` request, in the `ExclusiveStartShardId`
+ parameter in a subsequent request to `DescribeStream`.
+ `DescribeStream` is a paginated operation.
+
+ `DescribeStream` has a limit of 10 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream to describe.
+
+ :type limit: integer
+ :param limit: The maximum number of shards to return.
+
+ :type exclusive_start_shard_id: string
+ :param exclusive_start_shard_id: The shard ID of the shard to start
+ with for the stream description.
+
+ """
+ params = {'StreamName': stream_name, }
+ if limit is not None:
+ params['Limit'] = limit
+ if exclusive_start_shard_id is not None:
+ params['ExclusiveStartShardId'] = exclusive_start_shard_id
+ return self.make_request(action='DescribeStream',
+ body=json.dumps(params))
+
+ def get_records(self, shard_iterator, limit=None, b64_decode=True):
+ """
+ This operation returns one or more data records from a shard.
+ A `GetRecords` operation request can retrieve up to 10 MB of
+ data.
+
+ You specify a shard iterator for the shard that you want to
+ read data from in the `ShardIterator` parameter. The shard
+ iterator specifies the position in the shard from which you
+ want to start reading data records sequentially. A shard
+ iterator specifies this position using the sequence number of
+ a data record in the shard. For more information about the
+ shard iterator, see GetShardIterator.
+
+ `GetRecords` may return a partial result if the response size
+ limit is exceeded. You will get an error, but not a partial
+ result if the shard's provisioned throughput is exceeded, the
+ shard iterator has expired, or an internal processing failure
+ has occurred. Clients can request a smaller amount of data by
+ specifying a maximum number of returned records using the
+ `Limit` parameter. The `Limit` parameter can be set to an
+ integer value of up to 10,000. If you set the value to an
+ integer greater than 10,000, you will receive
+ `InvalidArgumentException`.
+
+ A new shard iterator is returned by every `GetRecords` request
+ in `NextShardIterator`, which you use in the `ShardIterator`
+ parameter of the next `GetRecords` request. When you
+ repeatedly read from an Amazon Kinesis stream use a
+ GetShardIterator request to get the first shard iterator to
+ use in your first `GetRecords` request and then use the shard
+ iterator returned in `NextShardIterator` for subsequent reads.
+
+ `GetRecords` can return `null` for the `NextShardIterator` to
+ reflect that the shard has been closed and that the requested
+ shard iterator would never have returned more data.
+
+ If no items can be processed because of insufficient
+ provisioned throughput on the shard involved in the request,
+ `GetRecords` throws `ProvisionedThroughputExceededException`.
+
+ :type shard_iterator: string
+ :param shard_iterator: The position in the shard from which you want to
+ start sequentially reading data records.
+
+ :type limit: integer
+ :param limit: The maximum number of records to return, which can be set
+ to a value of up to 10,000.
+
+ :type b64_decode: boolean
+ :param b64_decode: Decode the Base64-encoded ``Data`` field of records.
+
+ """
+ params = {'ShardIterator': shard_iterator, }
+ if limit is not None:
+ params['Limit'] = limit
+
+ response = self.make_request(action='GetRecords',
+ body=json.dumps(params))
+
+ # Base64 decode the data
+ if b64_decode:
+ for record in response.get('Records', []):
+ record['Data'] = base64.b64decode(record['Data'])
+
+ return response
+
+ def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
+ starting_sequence_number=None):
+ """
+ This operation returns a shard iterator in `ShardIterator`.
+ The shard iterator specifies the position in the shard from
+ which you want to start reading data records sequentially. A
+ shard iterator specifies this position using the sequence
+ number of a data record in a shard. A sequence number is the
+ identifier associated with every record ingested in the Amazon
+ Kinesis stream. The sequence number is assigned by the Amazon
+ Kinesis service when a record is put into the stream.
+
+ You must specify the shard iterator type in the
+ `GetShardIterator` request. For example, you can set the
+ `ShardIteratorType` parameter to read exactly from the
+ position denoted by a specific sequence number by using the
+ AT_SEQUENCE_NUMBER shard iterator type, or right after the
+ sequence number by using the AFTER_SEQUENCE_NUMBER shard
+ iterator type, using sequence numbers returned by earlier
+ PutRecord, GetRecords or DescribeStream requests. You can
+ specify the shard iterator type TRIM_HORIZON in the request to
+ cause `ShardIterator` to point to the last untrimmed record in
+ the shard in the system, which is the oldest data record in
+ the shard. Or you can point to just after the most recent
+ record in the shard, by using the shard iterator type LATEST,
+ so that you always read the most recent data in the shard.
+
+ **Note:** Each shard iterator expires five minutes after it is
+ returned to the requester.
+
+ When you repeatedly read from an Amazon Kinesis stream use a
+ GetShardIterator request to get the first shard iterator to to
+ use in your first `GetRecords` request and then use the shard
+ iterator returned by the `GetRecords` request in
+ `NextShardIterator` for subsequent reads. A new shard iterator
+ is returned by every `GetRecords` request in
+ `NextShardIterator`, which you use in the `ShardIterator`
+ parameter of the next `GetRecords` request.
+
+ If a `GetShardIterator` request is made too often, you will
+ receive a `ProvisionedThroughputExceededException`. For more
+ information about throughput limits, see the `Amazon Kinesis
+ Developer Guide`_.
+
+ `GetShardIterator` can return `null` for its `ShardIterator`
+ to indicate that the shard has been closed and that the
+ requested iterator will return no more data. A shard can be
+ closed by a SplitShard or MergeShards operation.
+
+ `GetShardIterator` has a limit of 5 transactions per second
+ per account per shard.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream.
+
+ :type shard_id: string
+ :param shard_id: The shard ID of the shard to get the iterator for.
+
+ :type shard_iterator_type: string
+ :param shard_iterator_type:
+ Determines how the shard iterator is used to start reading data records
+ from the shard.
+
+ The following are the valid shard iterator types:
+
+
+ + AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
+ by a specific sequence number.
+ + AFTER_SEQUENCE_NUMBER - Start reading right after the position
+ denoted by a specific sequence number.
+ + TRIM_HORIZON - Start reading at the last untrimmed record in the
+ shard in the system, which is the oldest data record in the shard.
+ + LATEST - Start reading just after the most recent record in the
+ shard, so that you always read the most recent data in the shard.
+
+ :type starting_sequence_number: string
+ :param starting_sequence_number: The sequence number of the data record
+ in the shard from which to start reading from.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'ShardId': shard_id,
+ 'ShardIteratorType': shard_iterator_type,
+ }
+ if starting_sequence_number is not None:
+ params['StartingSequenceNumber'] = starting_sequence_number
+ return self.make_request(action='GetShardIterator',
+ body=json.dumps(params))
+
+ def list_streams(self, limit=None, exclusive_start_stream_name=None):
+ """
+ This operation returns an array of the names of all the
+ streams that are associated with the AWS account making the
+ `ListStreams` request. A given AWS account can have many
+ streams active at one time.
+
+ The number of streams may be too large to return from a single
+ call to `ListStreams`. You can limit the number of returned
+ streams using the `Limit` parameter. If you do not specify a
+ value for the `Limit` parameter, Amazon Kinesis uses the
+ default limit, which is currently 10.
+
+ You can detect if there are more streams available to list by
+ using the `HasMoreStreams` flag from the returned output. If
+ there are more streams available, you can request more streams
+ by using the name of the last stream returned by the
+ `ListStreams` request in the `ExclusiveStartStreamName`
+ parameter in a subsequent request to `ListStreams`. The group
+ of stream names returned by the subsequent request is then
+ added to the list. You can continue this process until all the
+ stream names have been collected in the list.
+
+ `ListStreams` has a limit of 5 transactions per second per
+ account.
+
+ :type limit: integer
+ :param limit: The maximum number of streams to list.
+
+ :type exclusive_start_stream_name: string
+ :param exclusive_start_stream_name: The name of the stream to start the
+ list with.
+
+ """
+ params = {}
+ if limit is not None:
+ params['Limit'] = limit
+ if exclusive_start_stream_name is not None:
+ params['ExclusiveStartStreamName'] = exclusive_start_stream_name
+ return self.make_request(action='ListStreams',
+ body=json.dumps(params))
+
+ def merge_shards(self, stream_name, shard_to_merge,
+ adjacent_shard_to_merge):
+ """
+ This operation merges two adjacent shards in a stream and
+ combines them into a single shard to reduce the stream's
+ capacity to ingest and transport data. Two shards are
+ considered adjacent if the union of the hash key ranges for
+ the two shards form a contiguous set with no gaps. For
+ example, if you have two shards, one with a hash key range of
+ 276...381 and the other with a hash key range of 382...454,
+ then you could merge these two shards into a single shard that
+ would have a hash key range of 276...454. After the merge, the
+ single child shard receives data for all hash key values
+ covered by the two parent shards.
+
+ `MergeShards` is called when there is a need to reduce the
+ overall capacity of a stream because of excess capacity that
+ is not being used. The operation requires that you specify the
+ shard to be merged and the adjacent shard for a given stream.
+ For more information about merging shards, see the `Amazon
+ Kinesis Developer Guide`_.
+
+ If the stream is in the ACTIVE state, you can call
+ `MergeShards`. If a stream is in CREATING or UPDATING or
+ DELETING states, then Amazon Kinesis returns a
+ `ResourceInUseException`. If the specified stream does not
+ exist, Amazon Kinesis returns a `ResourceNotFoundException`.
+
+ You can use the DescribeStream operation to check the state of
+ the stream, which is returned in `StreamStatus`.
+
+ `MergeShards` is an asynchronous operation. Upon receiving a
+ `MergeShards` request, Amazon Kinesis immediately returns a
+ response and sets the `StreamStatus` to UPDATING. After the
+ operation is completed, Amazon Kinesis sets the `StreamStatus`
+ to ACTIVE. Read and write operations continue to work while
+ the stream is in the UPDATING state.
+
+ You use the DescribeStream operation to determine the shard
+ IDs that are specified in the `MergeShards` request.
+
+ If you try to operate on too many streams in parallel using
+ CreateStream, DeleteStream, `MergeShards` or SplitShard, you
+ will receive a `LimitExceededException`.
+
+ `MergeShards` has limit of 5 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream for the merge.
+
+ :type shard_to_merge: string
+ :param shard_to_merge: The shard ID of the shard to combine with the
+ adjacent shard for the merge.
+
+ :type adjacent_shard_to_merge: string
+ :param adjacent_shard_to_merge: The shard ID of the adjacent shard for
+ the merge.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'ShardToMerge': shard_to_merge,
+ 'AdjacentShardToMerge': adjacent_shard_to_merge,
+ }
+ return self.make_request(action='MergeShards',
+ body=json.dumps(params))
+
+ def put_record(self, stream_name, data, partition_key,
+ explicit_hash_key=None,
+ sequence_number_for_ordering=None,
+ exclusive_minimum_sequence_number=None,
+ b64_encode=True):
+ """
+ This operation puts a data record into an Amazon Kinesis
+ stream from a producer. This operation must be called to send
+ data from the producer into the Amazon Kinesis stream for
+ real-time ingestion and subsequent processing. The `PutRecord`
+ operation requires the name of the stream that captures,
+ stores, and transports the data; a partition key; and the data
+ blob itself. The data blob could be a segment from a log file,
+ geographic/location data, website clickstream data, or any
+ other data type.
+
+ The partition key is used to distribute data across shards.
+ Amazon Kinesis segregates the data records that belong to a
+ data stream into multiple shards, using the partition key
+ associated with each data record to determine which shard a
+ given data record belongs to.
+
+ Partition keys are Unicode strings, with a maximum length
+ limit of 256 bytes. An MD5 hash function is used to map
+ partition keys to 128-bit integer values and to map associated
+ data records to shards using the hash key ranges of the
+ shards. You can override hashing the partition key to
+ determine the shard by explicitly specifying a hash value
+ using the `ExplicitHashKey` parameter. For more information,
+ see the `Amazon Kinesis Developer Guide`_.
+
+ `PutRecord` returns the shard ID of where the data record was
+ placed and the sequence number that was assigned to the data
+ record.
+
+ The `SequenceNumberForOrdering` sets the initial sequence
+ number for the partition key. Later `PutRecord` requests to
+ the same partition key (from the same client) will
+ automatically increase from `SequenceNumberForOrdering`,
+ ensuring strict sequential ordering.
+
+ If a `PutRecord` request cannot be processed because of
+ insufficient provisioned throughput on the shard involved in
+ the request, `PutRecord` throws
+ `ProvisionedThroughputExceededException`.
+
+ Data records are accessible for only 24 hours from the time
+ that they are added to an Amazon Kinesis stream.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream to put the data record into.
+
+ :type data: blob
+ :param data: The data blob to put into the record, which will be Base64
+ encoded. The maximum size of the data blob is 50 kilobytes (KB).
+ Set `b64_encode` to disable automatic Base64 encoding.
+
+ :type partition_key: string
+ :param partition_key: Determines which shard in the stream the data
+ record is assigned to. Partition keys are Unicode strings with a
+ maximum length limit of 256 bytes. Amazon Kinesis uses the
+ partition key as input to a hash function that maps the partition
+ key and associated data to a specific shard. Specifically, an MD5
+ hash function is used to map partition keys to 128-bit integer
+ values and to map associated data records to shards. As a result of
+ this hashing mechanism, all data records with the same partition
+ key will map to the same shard within the stream.
+
+ :type explicit_hash_key: string
+ :param explicit_hash_key: The hash value used to explicitly determine
+ the shard the data record is assigned to by overriding the
+ partition key hash.
+
+ :type sequence_number_for_ordering: string
+ :param sequence_number_for_ordering: The sequence number to use as the
+ initial number for the partition key. Subsequent calls to
+ `PutRecord` from the same client and for the same partition key
+ will increase from the `SequenceNumberForOrdering` value.
+
+ :type b64_encode: boolean
+ :param b64_encode: Whether to Base64 encode `data`. Can be set to
+ ``False`` if `data` is already encoded to prevent double encoding.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'Data': data,
+ 'PartitionKey': partition_key,
+ }
+ if explicit_hash_key is not None:
+ params['ExplicitHashKey'] = explicit_hash_key
+ if sequence_number_for_ordering is not None:
+ params['SequenceNumberForOrdering'] = sequence_number_for_ordering
+ if b64_encode:
+ params['Data'] = base64.b64encode(params['Data'])
+ return self.make_request(action='PutRecord',
+ body=json.dumps(params))
+
+ def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
+ """
+ This operation splits a shard into two new shards in the
+ stream, to increase the stream's capacity to ingest and
+ transport data. `SplitShard` is called when there is a need to
+ increase the overall capacity of stream because of an expected
+ increase in the volume of data records being ingested.
+
+ `SplitShard` can also be used when a given shard appears to be
+ approaching its maximum utilization, for example, when the set
+ of producers sending data into the specific shard are suddenly
+ sending more than previously anticipated. You can also call
+ the `SplitShard` operation to increase stream capacity, so
+ that more Amazon Kinesis applications can simultaneously read
+ data from the stream for real-time processing.
+
+ The `SplitShard` operation requires that you specify the shard
+ to be split and the new hash key, which is the position in the
+ shard where the shard gets split in two. In many cases, the
+ new hash key might simply be the average of the beginning and
+ ending hash key, but it can be any hash key value in the range
+ being mapped into the shard. For more information about
+ splitting shards, see the `Amazon Kinesis Developer Guide`_.
+
+ You can use the DescribeStream operation to determine the
+ shard ID and hash key values for the `ShardToSplit` and
+ `NewStartingHashKey` parameters that are specified in the
+ `SplitShard` request.
+
+ `SplitShard` is an asynchronous operation. Upon receiving a
+ `SplitShard` request, Amazon Kinesis immediately returns a
+ response and sets the stream status to UPDATING. After the
+ operation is completed, Amazon Kinesis sets the stream status
+ to ACTIVE. Read and write operations continue to work while
+ the stream is in the UPDATING state.
+
+ You can use `DescribeStream` to check the status of the
+ stream, which is returned in `StreamStatus`. If the stream is
+ in the ACTIVE state, you can call `SplitShard`. If a stream is
+ in CREATING or UPDATING or DELETING states, then Amazon
+ Kinesis returns a `ResourceInUseException`.
+
+ If the specified stream does not exist, Amazon Kinesis returns
+ a `ResourceNotFoundException`. If you try to create more
+ shards than are authorized for your account, you receive a
+ `LimitExceededException`.
+
+ **Note:** The default limit for an AWS account is two shards
+ per stream. If you need to create a stream with more than two
+ shards, contact AWS Support to increase the limit on your
+ account.
+
+ If you try to operate on too many streams in parallel using
+ CreateStream, DeleteStream, MergeShards or SplitShard, you
+ will receive a `LimitExceededException`.
+
+ `SplitShard` has limit of 5 transactions per second per
+ account.
+
+ :type stream_name: string
+ :param stream_name: The name of the stream for the shard split.
+
+ :type shard_to_split: string
+ :param shard_to_split: The shard ID of the shard to split.
+
+ :type new_starting_hash_key: string
+ :param new_starting_hash_key: A hash key value for the starting hash
+ key of one of the child shards created by the split. The hash key
+ range for a given shard constitutes a set of ordered contiguous
+ positive integers. The value for `NewStartingHashKey` must be in
+ the range of hash keys being mapped into the shard. The
+ `NewStartingHashKey` hash key value and all higher hash key values
+ in hash key range are distributed to one of the child shards. All
+ the lower hash key values in the range are distributed to the other
+ child shard.
+
+ """
+ params = {
+ 'StreamName': stream_name,
+ 'ShardToSplit': shard_to_split,
+ 'NewStartingHashKey': new_starting_hash_key,
+ }
+ return self.make_request(action='SplitShard',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read()
+ boto.log.debug(response.getheaders())
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
2  boto/rds/__init__.py
View
@@ -443,7 +443,7 @@ def create_dbinstance(self,
# Remove any params set to None
for k, v in params.items():
- if not v: del(params[k])
+ if v is None: del(params[k])
return self.get_object('CreateDBInstance', params, DBInstance)
39 boto/rds/dbinstance.py
View
@@ -22,6 +22,7 @@
from boto.rds.dbsecuritygroup import DBSecurityGroup
from boto.rds.parametergroup import ParameterGroup
from boto.rds.statusinfo import StatusInfo
+from boto.rds.dbsubnetgroup import DBSubnetGroup
from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership
from boto.resultset import ResultSet
@@ -39,6 +40,8 @@ class DBInstance(object):
:ivar engine: The database engine being used
:ivar status: The status of the database in a string. e.g. "available"
:ivar allocated_storage: The size of the disk in gigabytes (int).
+ :ivar auto_minor_version_upgrade: Indicates that minor version patches
+ are applied automatically.
:ivar endpoint: A tuple that describes the hostname and port of
the instance. This is only available when the database is
in status "available".
@@ -75,7 +78,14 @@ class DBInstance(object):
:ivar read_replica_dbinstance_identifiers: List of read replicas
associated with this DB instance.
:ivar status_infos: The status of a Read Replica. If the instance is not a
- for a read replica, this will be blank.
+ for a read replica, this will be blank.
+ :ivar character_set_name: If present, specifies the name of the character
+ set that this instance is associated with.
+ :ivar subnet_group: Specifies information on the subnet group associated
+ with the DB instance, including the name, description, and subnets
+ in the subnet group.
+ :ivar engine_version: Indicates the database engine version.
+ :ivar license_model: License model information for this DB instance.
"""
def __init__(self, connection=None, id=None):
@@ -85,6 +95,7 @@ def __init__(self, connection=None, id=None):
self.engine = None
self.status = None
self.allocated_storage = None
+ self.auto_minor_version_upgrade = None
self.endpoint = None
self.instance_class = None
self.master_username = None
@@ -104,6 +115,10 @@ def __init__(self, connection=None, id=None):
self._port = None