Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Merge branch 'release-2.10.0'

* release-2.10.0: (43 commits)
  Bumping version to 2.10.0
  Added release notes for v2.10.0.
  Updated SNS docs.
  Moved EMR to SigV4.
  Added a better comment about why keys shouldn't be included from the previous commit.
  Fixed calling ``Item.partial_save`` on a new record now correctly saves.
  Support passing in a ParameterGroup for param_group arguments
  Updated Redshift for ``copy_cluster_snapshot/delete_cluster_snapshot`` changes.
  Update distribution.py
  Test PEP8 line length fixes
  Add modify_network_interface_attribute unit tests.
  Add a check to ensure a valid boolean, "true" or "false" are sent for boolean values before doing a call to the server.
  PEP8 and other style fixes to fit in with existing code.
  New method: modify_network_interface_attribute
  Ensure region info is passed and is an instance of RegionInfo before trying to create a new connection; add unit tests to ensure connect_to_region behavior.
  Remove trailing whitespace
  Cleanup
  Changed needs_save to be more concise.
  PEP8 fixes for example response
  Modify attachment device index to be an integer as stated in the service docs. Add unit test to make sure deviceIndex is passed to network interface attachments and to make sure it gets properly cast to an integer.
  ...
  • Loading branch information...
commit 32761fac621dc30fe345a5b03c37a5e4c9829722 2 parents 15c650e + f779a2f
@toastdriven toastdriven authored
Showing with 4,499 additions and 866 deletions.
  1. +2 −2 README.rst
  2. +11 −10 bin/s3put
  3. +1 −1  boto/__init__.py
  4. +2,814 −563 boto/cacerts/cacerts.txt
  5. +2 −2 boto/cloudfront/distribution.py
  6. +1 −1  boto/cloudsearch/__init__.py
  7. +16 −8 boto/cloudsearch/layer1.py
  8. +12 −4 boto/cloudsearch/layer2.py
  9. +1 −2  boto/cloudsearch/search.py
  10. +3 −0  boto/dynamodb2/__init__.py
  11. +138 −64 boto/dynamodb2/items.py
  12. +1 −1  boto/dynamodb2/table.py
  13. +5 −0 boto/ec2/__init__.py
  14. +5 −0 boto/ec2/autoscale/__init__.py
  15. +7 −0 boto/ec2/autoscale/policy.py
  16. +66 −0 boto/ec2/connection.py
  17. +2 −0  boto/ec2/networkinterface.py
  18. +7 −7 boto/emr/__init__.py
  19. +1 −1  boto/emr/connection.py
  20. +15 −11 boto/rds/__init__.py
  21. +8 −0 boto/redshift/exceptions.py
  22. +154 −41 boto/redshift/layer1.py
  23. +377 −127 boto/sns/connection.py
  24. +23 −0 docs/source/contributing.rst
  25. +1 −0  docs/source/index.rst
  26. +54 −0 docs/source/releasenotes/v2.10.0.rst
  27. +1 −1  tests/integration/cloudsearch/test_cert_verification.py
  28. +75 −0 tests/integration/cloudsearch/test_layers.py
  29. +42 −2 tests/integration/dynamodb2/test_highlevel.py
  30. +35 −0 tests/integration/sns/test_connection.py
  31. +2 −2 tests/integration/sns/test_sns_sqs_subscription.py
  32. +37 −0 tests/unit/cloudsearch/test_exceptions.py
  33. +70 −2 tests/unit/dynamodb2/test_table.py
  34. +76 −0 tests/unit/ec2/autoscale/test_group.py
  35. +223 −0 tests/unit/ec2/test_connection.py
  36. +161 −0 tests/unit/rds/test_connection.py
  37. +7 −14 tests/unit/s3/test_key.py
  38. +43 −0 tests/unit/sns/test_connection.py
View
4 README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.9.9
+boto 2.10.0
-Released: 24-July-2013
+Released: 13-August-2013
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
View
21 bin/s3put
@@ -112,9 +112,9 @@ SYNOPSIS
"""
-def usage():
+def usage(status=1):
print usage_string
- sys.exit()
+ sys.exit(status)
def submit_cb(bytes_so_far, total_bytes):
@@ -251,12 +251,12 @@ def main():
'secret_key=', 'no_overwrite', 'reduced', 'header=', 'multipart',
'host='])
except:
- usage()
+ usage(1)
# parse opts
for o, a in opts:
if o in ('-h', '--help'):
- usage()
+ usage(0)
if o in ('-a', '--access_key'):
aws_access_key_id = a
if o in ('-b', '--bucket'):
@@ -297,14 +297,14 @@ def main():
multipart_requested = True
else:
print "multipart upload requested but not capable"
- sys.exit()
+ sys.exit(4)
if len(args) < 1:
- usage()
+ usage(2)
if not bucket_name:
print "bucket name is required!"
- usage()
+ usage(3)
if host:
c = boto.connect_s3(host=host, aws_access_key_id=aws_access_key_id,
@@ -350,9 +350,10 @@ def main():
key_name = get_key_name(fullpath, prefix, key_prefix)
if no_overwrite and key_name in existing_keys_to_check_against:
- if not quiet:
- print 'Skipping %s as it exists in s3' % fullpath
- continue
+ if b.get_key(key_name):
+ if not quiet:
+ print 'Skipping %s as it exists in s3' % fullpath
+ continue
if not quiet:
print 'Copying %s to %s/%s' % (fullpath, bucket_name, key_name)
View
2  boto/__init__.py
@@ -36,7 +36,7 @@
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.9.9'
+__version__ = '2.10.0'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
View
3,377 boto/cacerts/cacerts.txt
2,814 additions, 563 deletions not shown
View
4 boto/cloudfront/distribution.py
@@ -362,14 +362,14 @@ def update(self, enabled=None, cnames=None, comment=None):
def enable(self):
"""
- Deactivate the Distribution. A convenience wrapper around
+ Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
- Activate the Distribution. A convenience wrapper around
+ Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
View
2  boto/cloudsearch/__init__.py
@@ -21,7 +21,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-from boto.ec2.regioninfo import RegionInfo
+from boto.regioninfo import RegionInfo
def regions():
View
24 boto/cloudsearch/layer1.py
@@ -51,14 +51,22 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
- AWSQueryConnection.__init__(self, aws_access_key_id,
- aws_secret_access_key,
- is_secure, port, proxy, proxy_port,
- proxy_user, proxy_pass,
- self.region.endpoint, debug,
- https_connection_factory, path,
- security_token,
- validate_certs=validate_certs)
+ AWSQueryConnection.__init__(
+ self,
+ host=self.region.endpoint,
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ is_secure=is_secure,
+ port=port,
+ proxy=proxy,
+ proxy_port=proxy_port,
+ proxy_user=proxy_user,
+ proxy_pass=proxy_pass,
+ debug=debug,
+ https_connection_factory=https_connection_factory,
+ path=path,
+ security_token=security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
return ['sign-v2']
View
16 boto/cloudsearch/layer2.py
@@ -32,10 +32,18 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
host=None, debug=0, session_token=None, region=None,
validate_certs=True):
- self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
- is_secure, port, proxy, proxy_port,
- host, debug, session_token, region,
- validate_certs=validate_certs)
+ self.layer1 = Layer1(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ is_secure=is_secure,
+ port=port,
+ proxy=proxy,
+ proxy_port=proxy_port,
+ host=host,
+ debug=debug,
+ security_token=session_token,
+ region=region,
+ validate_certs=validate_certs)
def list_domains(self, domain_names=None):
"""
View
3  boto/cloudsearch/search.py
@@ -37,7 +37,6 @@ class CommitMismatchError(Exception):
class SearchResults(object):
-
def __init__(self, **attrs):
self.rid = attrs['info']['rid']
# self.doc_coverage_pct = attrs['info']['doc-coverage-pct']
@@ -291,7 +290,7 @@ def __call__(self, query):
r = requests.get(url, params=params)
try:
data = json.loads(r.content)
- except json.JSONDecodeError,e:
+ except ValueError, e:
if r.status_code == 403:
msg = ''
import re
View
3  boto/dynamodb2/__init__.py
@@ -50,6 +50,9 @@ def regions():
RegionInfo(name='ap-southeast-1',
endpoint='dynamodb.ap-southeast-1.amazonaws.com',
connection_cls=DynamoDBConnection),
+ RegionInfo(name='ap-southeast-2',
+ endpoint='dynamodb.ap-southeast-2.amazonaws.com',
+ connection_cls=DynamoDBConnection),
RegionInfo(name='sa-east-1',
endpoint='dynamodb.sa-east-1.amazonaws.com',
connection_cls=DynamoDBConnection),
View
202 boto/dynamodb2/items.py
@@ -1,3 +1,5 @@
+from copy import deepcopy
+
from boto.dynamodb2.types import Dynamizer
@@ -18,7 +20,7 @@ class Item(object):
data. It also tries to intelligently track how data has changed throughout
the life of the instance, to be as efficient as possible about updates.
"""
- def __init__(self, table, data=None):
+ def __init__(self, table, data=None, loaded=False):
"""
Constructs an (unsaved) ``Item`` instance.
@@ -32,6 +34,10 @@ def __init__(self, table, data=None):
Optionally accepts a ``data`` parameter, which should be a dictionary
of the fields & values of the item.
+ Optionally accepts a ``loaded`` parameter, which should be a boolean.
+ ``True`` if it was preexisting data loaded from DynamoDB, ``False`` if
+ it's new data from the user. Default is ``False``.
+
Example::
>>> users = Table('users')
@@ -57,41 +63,28 @@ def __init__(self, table, data=None):
"""
self.table = table
- self._data = {}
+ self._loaded = loaded
self._orig_data = {}
- self._is_dirty = False
+ self._data = data
self._dynamizer = Dynamizer()
- if data:
- self._data = data
- self._is_dirty = True
+ if self._data is None:
+ self._data = {}
- for key in data.keys():
- self._orig_data[key] = NEWVALUE
+ if self._loaded:
+ self._orig_data = deepcopy(self._data)
def __getitem__(self, key):
return self._data.get(key, None)
def __setitem__(self, key, value):
- # Stow the original value if present, so we can track what's changed.
- if key in self._data:
- self._orig_data[key] = self._data[key]
- else:
- # Use a marker to indicate we've never seen a value for this key.
- self._orig_data[key] = NEWVALUE
-
self._data[key] = value
- self._is_dirty = True
def __delitem__(self, key):
if not key in self._data:
return
- # Stow the original value, so we can track what's changed.
- value = self._data[key]
del self._data[key]
- self._orig_data[key] = value
- self._is_dirty = True
def keys(self):
return self._data.keys()
@@ -112,10 +105,50 @@ def __iter__(self):
def __contains__(self, key):
return key in self._data
- def needs_save(self):
+ def _determine_alterations(self):
+ """
+ Checks the ``-orig_data`` against the ``_data`` to determine what
+ changes to the data are present.
+
+ Returns a dictionary containing the keys ``adds``, ``changes`` &
+ ``deletes``, containing the updated data.
+ """
+ alterations = {
+ 'adds': {},
+ 'changes': {},
+ 'deletes': [],
+ }
+
+ orig_keys = set(self._orig_data.keys())
+ data_keys = set(self._data.keys())
+
+ # Run through keys we know are in both for changes.
+ for key in orig_keys.intersection(data_keys):
+ if self._data[key] != self._orig_data[key]:
+ if self._is_storable(self._data[key]):
+ alterations['changes'][key] = self._data[key]
+ else:
+ alterations['deletes'].append(key)
+
+ # Run through additions.
+ for key in data_keys.difference(orig_keys):
+ if self._is_storable(self._data[key]):
+ alterations['adds'][key] = self._data[key]
+
+ # Run through deletions.
+ for key in orig_keys.difference(data_keys):
+ alterations['deletes'].append(key)
+
+ return alterations
+
+ def needs_save(self, data=None):
"""
Returns whether or not the data has changed on the ``Item``.
+ Optionally accepts a ``data`` argument, which accepts the output from
+ ``self._determine_alterations()`` if you've already called it. Typically
+ unnecessary to do. Default is ``None``.
+
Example:
>>> user.needs_save()
@@ -125,7 +158,17 @@ def needs_save(self):
True
"""
- return self._is_dirty
+ if data is None:
+ data = self._determine_alterations()
+
+ needs_save = False
+
+ for kind in ['adds', 'changes', 'deletes']:
+ if len(data[kind]):
+ needs_save = True
+ break
+
+ return needs_save
def mark_clean(self):
"""
@@ -143,23 +186,16 @@ def mark_clean(self):
False
"""
- self._orig_data = {}
- self._is_dirty = False
+ self._orig_data = deepcopy(self._data)
def mark_dirty(self):
"""
- Marks an ``Item`` instance as needing to be saved.
-
- Example:
-
- >>> user.needs_save()
- False
- >>> user.mark_dirty()
- >>> user.needs_save()
- True
+ DEPRECATED: Marks an ``Item`` instance as needing to be saved.
+ This method is no longer necessary, as the state tracking on ``Item``
+ has been improved to automatically detect proper state.
"""
- self._is_dirty = True
+ return
def load(self, data):
"""
@@ -175,7 +211,8 @@ def load(self, data):
for field_name, field_value in data.get('Item', {}).items():
self[field_name] = self._dynamizer.decode(field_value)
- self.mark_clean()
+ self._loaded = True
+ self._orig_data = deepcopy(self._data)
def get_keys(self):
"""
@@ -229,30 +266,42 @@ def build_expects(self, fields=None):
raise ValueError("Unknown key %s provided." % key)
# States:
- # * New field (_data & _orig_data w/ marker)
- # * Unchanged field (only _data)
- # * Modified field (_data & _orig_data)
- # * Deleted field (only _orig_data)
- if not key in self._orig_data:
+ # * New field (only in _data)
+ # * Unchanged field (in both _data & _orig_data, same data)
+ # * Modified field (in both _data & _orig_data, different data)
+ # * Deleted field (only in _orig_data)
+ orig_value = self._orig_data.get(key, NEWVALUE)
+ current_value = self._data.get(key, NEWVALUE)
+
+ if orig_value == current_value:
# Existing field unchanged.
- value = self._data[key]
+ value = current_value
else:
if key in self._data:
- if self._orig_data[key] is NEWVALUE:
+ if not key in self._orig_data:
# New field.
expects[key]['Exists'] = False
else:
# Existing field modified.
- value = self._orig_data[key]
+ value = orig_value
else:
# Existing field deleted.
- value = self._orig_data[key]
+ value = orig_value
if value is not None:
expects[key]['Value'] = self._dynamizer.encode(value)
return expects
+ def _is_storable(self, value):
+ # We need to prevent ``None``, empty string & empty set from
+ # heading to DDB, but allow false-y values like 0 & False make it.
+ if not value:
+ if not value in (0, 0.0, False):
+ return False
+
+ return True
+
def prepare_full(self):
"""
Runs through all fields & encodes them to be handed off to DynamoDB
@@ -265,6 +314,9 @@ def prepare_full(self):
final_data = {}
for key, value in self._data.items():
+ if not self._is_storable(value):
+ continue
+
final_data[key] = self._dynamizer.encode(value)
return final_data
@@ -280,22 +332,30 @@ def prepare_partial(self):
# This doesn't save on it's own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
+ fields = set()
+ alterations = self._determine_alterations()
- # Loop over ``_orig_data`` so that we only build up data that's changed.
- for key, value in self._orig_data.items():
- if key in self._data:
- # It changed.
- final_data[key] = {
- 'Action': 'PUT',
- 'Value': self._dynamizer.encode(self._data[key])
- }
- else:
- # It was deleted.
- final_data[key] = {
- 'Action': 'DELETE',
- }
+ for key, value in alterations['adds'].items():
+ final_data[key] = {
+ 'Action': 'PUT',
+ 'Value': self._dynamizer.encode(self._data[key])
+ }
+ fields.add(key)
- return final_data
+ for key, value in alterations['changes'].items():
+ final_data[key] = {
+ 'Action': 'PUT',
+ 'Value': self._dynamizer.encode(self._data[key])
+ }
+ fields.add(key)
+
+ for key in alterations['deletes']:
+ final_data[key] = {
+ 'Action': 'DELETE',
+ }
+ fields.add(key)
+
+ return final_data, fields
def partial_save(self):
"""
@@ -316,14 +376,28 @@ def partial_save(self):
>>> user.partial_save()
"""
- if not self.needs_save():
- return False
-
key = self.get_keys()
# Build a new dict of only the data we're changing.
- final_data = self.prepare_partial()
+ final_data, fields = self.prepare_partial()
+
+ if not final_data:
+ return False
+
+ # Remove the key(s) from the ``final_data`` if present.
+ # They should only be present if this is a new item, in which
+ # case we shouldn't be sending as part of the data to update.
+ for fieldname, value in key.items():
+ if fieldname in final_data:
+ del final_data[fieldname]
+
+ try:
+ # It's likely also in ``fields``, so remove it there too.
+ fields.remove(fieldname)
+ except KeyError:
+ pass
+
# Build expectations of only the fields we're planning to update.
- expects = self.build_expects(fields=self._orig_data.keys())
+ expects = self.build_expects(fields=fields)
returned = self.table._update_item(key, final_data, expects=expects)
# Mark the object as clean.
self.mark_clean()
@@ -359,7 +433,7 @@ def save(self, overwrite=False):
>>> user.save(overwrite=True)
"""
- if not self.needs_save():
+ if not self.needs_save() and not overwrite:
return False
final_data = self.prepare_full()
View
2  boto/dynamodb2/table.py
@@ -611,7 +611,7 @@ def _build_filters(self, filter_kwargs, using=QUERY_OPERATORS):
'AttributeValueList': [],
'ComparisonOperator': op,
}
-
+
# Special-case the ``NULL/NOT_NULL`` case.
if field_bits[-1] == 'null':
del lookup['AttributeValueList']
View
5 boto/ec2/__init__.py
@@ -72,9 +72,14 @@ def connect_to_region(region_name, **kw_params):
:return: A connection to the given region, or None if an invalid region
name is given
"""
+ if 'region' in kw_params and isinstance(kw_params['region'], RegionInfo)\
+ and region_name == kw_params['region'].name:
+ return EC2Connection(**kw_params)
+
for region in regions(**kw_params):
if region.name == region_name:
return region.connect(**kw_params)
+
return None
View
5 boto/ec2/autoscale/__init__.py
@@ -254,6 +254,11 @@ def create_scaling_policy(self, scaling_policy):
'AutoScalingGroupName': scaling_policy.as_name,
'PolicyName': scaling_policy.name,
'ScalingAdjustment': scaling_policy.scaling_adjustment}
+
+ if scaling_policy.adjustment_type == "PercentChangeInCapacity" and \
+ scaling_policy.min_adjustment_step is not None:
+ params['MinAdjustmentStep'] = scaling_policy.min_adjustment_step
+
if scaling_policy.cooldown is not None:
params['Cooldown'] = scaling_policy.cooldown
View
7 boto/ec2/autoscale/policy.py
@@ -115,6 +115,10 @@ def __init__(self, connection=None, **kwargs):
:type scaling_adjustment: int
:param scaling_adjustment: Value of adjustment (type specified in `adjustment_type`).
+ :type min_adjustment_step: int
+ :param min_adjustment_step: Value of min adjustment step required to
+ apply the scaling policy (only make sense when use `PercentChangeInCapacity` as adjustment_type.).
+
:type cooldown: int
:param cooldown: Time (in seconds) before Alarm related Scaling Activities can start after the previous Scaling Activity ends.
@@ -125,6 +129,7 @@ def __init__(self, connection=None, **kwargs):
self.scaling_adjustment = kwargs.get('scaling_adjustment', None)
self.cooldown = kwargs.get('cooldown', None)
self.connection = connection
+ self.min_adjustment_step = kwargs.get('min_adjustment_step', None)
def __repr__(self):
return 'ScalingPolicy(%s group:%s adjustment:%s)' % (self.name,
@@ -149,6 +154,8 @@ def endElement(self, name, value, connection):
self.cooldown = int(value)
elif name == 'AdjustmentType':
self.adjustment_type = value
+ elif name == 'MinAdjustmentStep':
+ self.min_adjustment_step = int(value)
def delete(self):
return self.connection.delete_policy(self.name, self.as_name)
View
66 boto/ec2/connection.py
@@ -847,6 +847,72 @@ def get_instance_attribute(self, instance_id, attribute):
return self.get_object('DescribeInstanceAttribute', params,
InstanceAttribute, verb='POST')
+ def modify_network_interface_attribute(self, interface_id, attr, value,
+ attachment_id=None):
+ """
+ Changes an attribute of a network interface.
+
+ :type interface_id: string
+ :param interface_id: The interface id. Looks like 'eni-xxxxxxxx'
+
+ :type attr: string
+ :param attr: The attribute you wish to change.
+
+ Learn more at http://docs.aws.amazon.com/AWSEC2/latest/API\
+ Reference/ApiReference-query-ModifyNetworkInterfaceAttribute.html
+
+ * description - Textual description of interface
+ * groupSet - List of security group ids or group objects
+ * sourceDestCheck - Boolean
+ * deleteOnTermination - Boolean. Must also specify attachment_id
+
+ :type value: string
+ :param value: The new value for the attribute
+
+ :rtype: bool
+ :return: Whether the operation succeeded or not
+
+ :type attachment_id: string
+ :param attachment_id: If you're modifying DeleteOnTermination you must
+ specify the attachment_id.
+ """
+ bool_reqs = (
+ 'deleteontermination',
+ 'sourcedestcheck',
+ )
+ if attr.lower() in bool_reqs:
+ if isinstance(value, bool):
+ if value:
+ value = 'true'
+ else:
+ value = 'false'
+ elif value not in ['true', 'false']:
+ raise ValueError('%s must be a boolean, "true", or "false"!'
+ % attr)
+
+ params = {'NetworkInterfaceId': interface_id}
+
+ # groupSet is handled differently from other arguments
+ if attr.lower() == 'groupset':
+ for idx, sg in enumerate(value):
+ if isinstance(sg, SecurityGroup):
+ sg = sg.id
+ params['SecurityGroupId.%s' % (idx + 1)] = sg
+ elif attr.lower() == 'description':
+ params['Description.Value'] = value
+ elif attr.lower() == 'sourcedestcheck':
+ params['SourceDestCheck.Value'] = value
+ elif attr.lower() == 'deleteontermination':
+ params['Attachment.DeleteOnTermination'] = value
+ if not attachment_id:
+ raise ValueError('You must also specify an attachment_id')
+ params['Attachment.AttachmentId'] = attachment_id
+ else:
+ raise ValueError('Unknown attribute "%s"' % (attr,))
+
+ return self.get_status(
+ 'ModifyNetworkInterfaceAttribute', params, verb='POST')
+
def modify_instance_attribute(self, instance_id, attribute, value):
"""
Changes an attribute of an instance
View
2  boto/ec2/networkinterface.py
@@ -59,6 +59,8 @@ def endElement(self, name, value, connection):
self.id = value
elif name == 'instanceId':
self.instance_id = value
+ elif name == 'deviceIndex':
+ self.device_index = int(value)
elif name == 'instanceOwnerId':
self.instance_owner_id = value
elif name == 'status':
View
14 boto/emr/__init__.py
@@ -43,25 +43,25 @@ def regions():
endpoint='elasticmapreduce.us-east-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='us-west-1',
- endpoint='us-west-1.elasticmapreduce.amazonaws.com',
+ endpoint='elasticmapreduce.us-west-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='us-west-2',
- endpoint='us-west-2.elasticmapreduce.amazonaws.com',
+ endpoint='elasticmapreduce.us-west-2.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='ap-northeast-1',
- endpoint='ap-northeast-1.elasticmapreduce.amazonaws.com',
+ endpoint='elasticmapreduce.ap-northeast-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='ap-southeast-1',
- endpoint='ap-southeast-1.elasticmapreduce.amazonaws.com',
+ endpoint='elasticmapreduce.ap-southeast-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='ap-southeast-2',
- endpoint='ap-southeast-2.elasticmapreduce.amazonaws.com',
+ endpoint='elasticmapreduce.ap-southeast-2.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='eu-west-1',
- endpoint='eu-west-1.elasticmapreduce.amazonaws.com',
+ endpoint='elasticmapreduce.eu-west-1.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='sa-east-1',
- endpoint='sa-east-1.elasticmapreduce.amazonaws.com',
+ endpoint='elasticmapreduce.sa-east-1.amazonaws.com',
connection_cls=EmrConnection),
]
View
2  boto/emr/connection.py
@@ -67,7 +67,7 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
validate_certs=validate_certs)
def _required_auth_capability(self):
- return ['emr']
+ return ['hmac-v4']
def describe_jobflow(self, jobflow_id):
"""
View
26 boto/rds/__init__.py
@@ -277,10 +277,10 @@ def create_dbinstance(self,
* SQL Server:
Not applicable and must be None.
- :type param_group: str
- :param param_group: Name of DBParameterGroup to associate with
- this DBInstance. If no groups are specified
- no parameter groups will be used.
+ :type param_group: str or ParameterGroup object
+ :param param_group: Name of DBParameterGroup or ParameterGroup instance
+ to associate with this DBInstance. If no groups are
+ specified no parameter groups will be used.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to
@@ -399,7 +399,9 @@ def create_dbinstance(self,
'DBInstanceClass': instance_class,
'DBInstanceIdentifier': id,
'DBName': db_name,
- 'DBParameterGroupName': param_group,
+ 'DBParameterGroupName': (param_group.name
+ if isinstance(param_group, ParameterGroup)
+ else param_group),
'DBSubnetGroupName': db_subnet_group_name,
'Engine': engine,
'EngineVersion': engine_version,
@@ -512,10 +514,10 @@ def modify_dbinstance(self, id, param_group=None, security_groups=None,
:type id: str
:param id: Unique identifier for the new instance.
- :type param_group: str
- :param param_group: Name of DBParameterGroup to associate with
- this DBInstance. If no groups are specified
- no parameter groups will be used.
+ :type param_group: str or ParameterGroup object
+ :param param_group: Name of DBParameterGroup or ParameterGroup instance
+ to associate with this DBInstance. If no groups are
+ specified no parameter groups will be used.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to authorize on
@@ -586,7 +588,9 @@ def modify_dbinstance(self, id, param_group=None, security_groups=None,
"""
params = {'DBInstanceIdentifier': id}
if param_group:
- params['DBParameterGroupName'] = param_group
+ params['DBParameterGroupName'] = (param_group.name
+ if isinstance(param_group, ParameterGroup)
+ else param_group)
if security_groups:
l = []
for group in security_groups:
@@ -1424,4 +1428,4 @@ def describe_option_group_options(self, engine_name=None,
params['Marker'] = marker
return self.get_list('DescribeOptionGroupOptions', params, [
('OptionGroupOptions', OptionGroupOption)
- ])
+ ])
View
8 boto/redshift/exceptions.py
@@ -180,3 +180,11 @@ class SubnetAlreadyInUse(JSONResponseError):
class InvalidParameterCombinationFault(JSONResponseError):
pass
+
+
+class AccessToSnapshotDeniedFault(JSONResponseError):
+ pass
+
+
+class UnauthorizedOperationFault(JSONResponseError):
+ pass
View
195 boto/redshift/layer1.py
@@ -89,44 +89,47 @@ class RedshiftConnection(AWSQueryConnection):
_faults = {
"ClusterNotFound": exceptions.ClusterNotFoundFault,
- "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotStateFault,
- "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFoundFault,
- "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceededFault,
- "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFoundFault,
- "InvalidSubnet": exceptions.InvalidSubnet,
- "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceededFault,
- "InvalidClusterState": exceptions.InvalidClusterStateFault,
+ "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetStateFault,
"InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupStateFault,
- "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExistsFault,
- "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupStateFault,
+ "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceededFault,
+ "InvalidClusterState": exceptions.InvalidClusterStateFault,
"InvalidRestore": exceptions.InvalidRestoreFault,
- "AuthorizationNotFound": exceptions.AuthorizationNotFoundFault,
- "ResizeNotFound": exceptions.ResizeNotFoundFault,
+ "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExistsFault,
"NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceededFault,
- "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExistsFault,
+ "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFoundFault,
+ "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacityFault,
+ "UnauthorizedOperation": exceptions.UnauthorizedOperationFault,
+ "ClusterQuotaExceeded": exceptions.ClusterQuotaExceededFault,
+ "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkStateFault,
+ "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFoundFault,
"AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceededFault,
- "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExistsFault,
+ "InvalidSubne": exceptions.InvalidSubnet,
+ "ResizeNotFound": exceptions.ResizeNotFoundFault,
+ "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFoundFault,
"ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceededFault,
- "ReservedNodeNotFound": exceptions.ReservedNodeNotFoundFault,
- "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExistsFault,
- "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExistsFault,
- "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFoundFault,
- "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceededFault,
- "ClusterQuotaExceeded": exceptions.ClusterQuotaExceededFault,
+ "AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault,
+ "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupStateFault,
+ "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceededFault,
"ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceededFault,
"UnsupportedOption": exceptions.UnsupportedOptionFault,
- "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkStateFault,
"ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFoundFault,
- "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupStateFault,
- "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExistsFault,
- "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceededFault,
- "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFoundFault,
- "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceededFault,
"ClusterAlreadyExists": exceptions.ClusterAlreadyExistsFault,
- "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacityFault,
- "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetStateFault,
+ "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExistsFault,
+ "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExistsFault,
+ "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceededFault,
+ "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFoundFault,
+ "AuthorizationNotFound": exceptions.AuthorizationNotFoundFault,
+ "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceededFault,
+ "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExistsFault,
+ "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotStateFault,
+ "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceededFault,
+ "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExistsFault,
+ "ReservedNodeNotFound": exceptions.ReservedNodeNotFoundFault,
+ "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupStateFault,
+ "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExistsFault,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
- "InvalidParameterCombination": exceptions.InvalidParameterCombinationFault,
+ "AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault,
+ "UnauthorizedOperation": exceptions.UnauthorizedOperationFault,
}
@@ -199,8 +202,43 @@ def authorize_cluster_security_group_ingress(self,
verb='POST',
path='/', params=params)
+ def authorize_snapshot_access(self, snapshot_identifier,
+ account_with_restore_access,
+ snapshot_cluster_identifier=None):
+ """
+ Authorizes the specified AWS customer account to restore the
+ specified snapshot.
+
+ For more information about working with snapshots, go to
+ `Amazon Redshift Snapshots`_ in the Amazon Redshift Management
+ Guide .
+
+ :type snapshot_identifier: string
+ :param snapshot_identifier: The identifier of the snapshot the account
+ is authorized to restore.
+
+ :type snapshot_cluster_identifier: string
+ :param snapshot_cluster_identifier:
+
+ :type account_with_restore_access: string
+ :param account_with_restore_access: The identifier of the AWS customer
+ account authorized to restore the specified snapshot.
+
+ """
+ params = {
+ 'SnapshotIdentifier': snapshot_identifier,
+ 'AccountWithRestoreAccess': account_with_restore_access,
+ }
+ if snapshot_cluster_identifier is not None:
+ params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
+ return self._make_request(
+ action='AuthorizeSnapshotAccess',
+ verb='POST',
+ path='/', params=params)
+
def copy_cluster_snapshot(self, source_snapshot_identifier,
- target_snapshot_identifier):
+ target_snapshot_identifier,
+ source_snapshot_cluster_identifier=None):
"""
Copies the specified automated cluster snapshot to a new
manual cluster snapshot. The source must be an automated
@@ -227,6 +265,9 @@ def copy_cluster_snapshot(self, source_snapshot_identifier,
+ Must be the identifier for a valid automated snapshot whose state is
"available".
+ :type source_snapshot_cluster_identifier: string
+ :param source_snapshot_cluster_identifier:
+
:type target_snapshot_identifier: string
:param target_snapshot_identifier:
The identifier given to the new manual snapshot.
@@ -245,6 +286,8 @@ def copy_cluster_snapshot(self, source_snapshot_identifier,
'SourceSnapshotIdentifier': source_snapshot_identifier,
'TargetSnapshotIdentifier': target_snapshot_identifier,
}
+ if source_snapshot_cluster_identifier is not None:
+ params['SourceSnapshotClusterIdentifier'] = source_snapshot_cluster_identifier
return self._make_request(
action='CopyClusterSnapshot',
verb='POST',
@@ -353,6 +396,8 @@ def create_cluster(self, cluster_identifier, node_type, master_username,
+ Must contain at least one uppercase letter.
+ Must contain at least one lowercase letter.
+ Must contain one number.
+ + Can be any printable ASCII character (ASCII code 33 to 126) except '
+ (single quote), " (double quote), \, /, @, or space.
:type cluster_security_groups: list
:param cluster_security_groups: A list of security groups to be
@@ -396,10 +441,7 @@ def create_cluster(self, cluster_identifier, node_type, master_username,
+ **US-East (Northern Virginia) Region:** 03:00-11:00 UTC
- + **US-West (Northern California) Region:** 06:00-14:00 UTC
- + **EU (Ireland) Region:** 22:00-06:00 UTC
- + **Asia Pacific (Singapore) Region:** 14:00-22:00 UTC
- + **Asia Pacific (Tokyo) Region: ** 17:00-03:00 UTC
+ + **US-West (Oregon) Region** 06:00-14:00 UTC
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
@@ -822,15 +864,19 @@ def delete_cluster_security_group(self, cluster_security_group_name):
verb='POST',
path='/', params=params)
- def delete_cluster_snapshot(self, snapshot_identifier):
+ def delete_cluster_snapshot(self, snapshot_identifier,
+ snapshot_cluster_identifier=None):
"""
Deletes the specified manual snapshot. The snapshot must be in
- the "available" state.
+ the "available" state, with no other users authorized to
+ access the snapshot.
Unlike automated snapshots, manual snapshots are retained even
after you delete your cluster. Amazon Redshift does not delete
your manual snapshots. You must delete manual snapshot
- explicitly to avoid getting charged.
+ explicitly to avoid getting charged. If other accounts are
+ authorized to access the snapshot, you must revoke all of the
+ authorizations before you can delete the snapshot.
:type snapshot_identifier: string
:param snapshot_identifier: The unique identifier of the manual
@@ -838,8 +884,13 @@ def delete_cluster_snapshot(self, snapshot_identifier):
Constraints: Must be the name of an existing snapshot that is in the
`available` state.
+ :type snapshot_cluster_identifier: string
+ :param snapshot_cluster_identifier:
+
"""
params = {'SnapshotIdentifier': snapshot_identifier, }
+ if snapshot_cluster_identifier is not None:
+ params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
return self._make_request(
action='DeleteClusterSnapshot',
verb='POST',
@@ -1021,12 +1072,14 @@ def describe_cluster_snapshots(self, cluster_identifier=None,
snapshot_identifier=None,
snapshot_type=None, start_time=None,
end_time=None, max_records=None,
- marker=None):
+ marker=None, owner_account=None):
"""
Returns one or more snapshot objects, which contain metadata
about your cluster snapshots. By default, this operation
returns information about all snapshots of all clusters that
- are owned by the AWS account.
+ are owned by you AWS customer account. No information is
+ returned for snapshots owned by inactive AWS customer
+ accounts.
:type cluster_identifier: string
:param cluster_identifier: The identifier of the cluster for which
@@ -1071,6 +1124,13 @@ def describe_cluster_snapshots(self, cluster_identifier=None,
DescribeClusterSnapshots request to indicate the first snapshot
that the request will return.
+ :type owner_account: string
+ :param owner_account: The AWS customer account used to create or copy
+ the snapshot. Use this field to filter the results to snapshots
+ owned by a particular account. To describe snapshots you own,
+ either specify your AWS customer account, or do not specify the
+ parameter.
+
"""
params = {}
if cluster_identifier is not None:
@@ -1087,6 +1147,8 @@ def describe_cluster_snapshots(self, cluster_identifier=None,
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
+ if owner_account is not None:
+ params['OwnerAccount'] = owner_account
return self._make_request(
action='DescribeClusterSnapshots',
verb='POST',
@@ -1658,6 +1720,8 @@ def modify_cluster(self, cluster_identifier, cluster_type=None,
+ Must contain at least one uppercase letter.
+ Must contain at least one lowercase letter.
+ Must contain one number.
+ + Can be any printable ASCII character (ASCII code 33 to 126) except '
+ (single quote), " (double quote), \, /, @, or space.
:type cluster_parameter_group_name: string
:param cluster_parameter_group_name: The name of the cluster parameter
@@ -1913,11 +1977,13 @@ def reset_cluster_parameter_group(self, parameter_group_name,
path='/', params=params)
def restore_from_cluster_snapshot(self, cluster_identifier,
- snapshot_identifier, port=None,
- availability_zone=None,
+ snapshot_identifier,
+ snapshot_cluster_identifier=None,
+ port=None, availability_zone=None,
allow_version_upgrade=None,
cluster_subnet_group_name=None,
- publicly_accessible=None):
+ publicly_accessible=None,
+ owner_account=None):
"""
Creates a new cluster from a snapshot. Amazon Redshift creates
the resulting cluster with the same configuration as the
@@ -1956,6 +2022,9 @@ def restore_from_cluster_snapshot(self, cluster_identifier,
create the new cluster. This parameter isn't case sensitive.
Example: `my-snapshot-id`
+ :type snapshot_cluster_identifier: string
+ :param snapshot_cluster_identifier:
+
:type port: integer
:param port: The port number on which the cluster accepts connections.
Default: The same port as the original cluster.
@@ -1986,11 +2055,18 @@ def restore_from_cluster_snapshot(self, cluster_identifier,
:param publicly_accessible: If `True`, the cluster can be accessed from
a public network.
+ :type owner_account: string
+ :param owner_account: The AWS customer account used to create or copy
+ the snapshot. Required if you are restoring a snapshot you do not
+ own, optional if you own the snapshot.
+
"""
params = {
'ClusterIdentifier': cluster_identifier,
'SnapshotIdentifier': snapshot_identifier,
}
+ if snapshot_cluster_identifier is not None:
+ params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
if port is not None:
params['Port'] = port
if availability_zone is not None:
@@ -2003,6 +2079,8 @@ def restore_from_cluster_snapshot(self, cluster_identifier,
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
+ if owner_account is not None:
+ params['OwnerAccount'] = owner_account
return self._make_request(
action='RestoreFromClusterSnapshot',
verb='POST',
@@ -2060,6 +2138,41 @@ def revoke_cluster_security_group_ingress(self,
verb='POST',
path='/', params=params)
+ def revoke_snapshot_access(self, snapshot_identifier,
+ account_with_restore_access,
+ snapshot_cluster_identifier=None):
+ """
+ Removes the ability of the specified AWS customer account to
+ restore the specified snapshot. If the account is currently
+ restoring the snapshot, the restore will run to completion.
+
+ For more information about working with snapshots, go to
+ `Amazon Redshift Snapshots`_ in the Amazon Redshift Management
+ Guide .
+
+ :type snapshot_identifier: string
+ :param snapshot_identifier: The identifier of the snapshot that the
+ account can no longer access.
+
+ :type snapshot_cluster_identifier: string
+ :param snapshot_cluster_identifier:
+
+ :type account_with_restore_access: string
+ :param account_with_restore_access: The identifier of the AWS customer
+ account that can no longer restore the specified snapshot.
+
+ """
+ params = {
+ 'SnapshotIdentifier': snapshot_identifier,
+ 'AccountWithRestoreAccess': account_with_restore_access,
+ }
+ if snapshot_cluster_identifier is not None:
+ params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
+ return self._make_request(
+ action='RevokeSnapshotAccess',
+ verb='POST',
+ path='/', params=params)
+
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
View
504 boto/sns/connection.py
@@ -30,7 +30,24 @@
class SNSConnection(AWSQueryConnection):
-
+ """
+ Amazon Simple Notification Service
+ Amazon Simple Notification Service (Amazon SNS) is a web service
+ that enables you to build distributed web-enabled applications.
+ Applications can use Amazon SNS to easily push real-time
+ notification messages to interested subscribers over multiple
+ delivery protocols. For more information about this product see
+ `http://aws.amazon.com/sns`_. For detailed information about
+ Amazon SNS features and their associated API calls, see the
+ `Amazon SNS Developer Guide`_.
+
+ We also provide SDKs that enable you to access Amazon SNS from
+ your preferred programming language. The SDKs contain
+ functionality that automatically takes care of tasks such as:
+ cryptographically signing your service requests, retrying
+ requests, and handling error responses. For a list of available
+ SDKs, go to `Tools for Amazon Web Services`_.
+ """
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com'
APIVersion = '2010-03-31'
@@ -64,17 +81,10 @@ def get_all_topics(self, next_token=None):
this method.
"""
- params = {'ContentType': 'JSON'}
+ params = {}
if next_token:
params['NextToken'] = next_token
- response = self.make_request('ListTopics', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._make_request('ListTopics', params)
def get_topic_attributes(self, topic):
"""
@@ -84,16 +94,8 @@ def get_topic_attributes(self, topic):
:param topic: The ARN of the topic.
"""
- params = {'ContentType': 'JSON',
- 'TopicArn': topic}
- response = self.make_request('GetTopicAttributes', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ params = {'TopicArn': topic}
+ return self._make_request('GetTopicAttributes', params)
def set_topic_attributes(self, topic, attr_name, attr_value):
"""
@@ -111,18 +113,10 @@ def set_topic_attributes(self, topic, attr_name, attr_value):
:param attr_value: The new value for the attribute.
"""
- params = {'ContentType': 'JSON',
- 'TopicArn': topic,
+ params = {'TopicArn': topic,
'AttributeName': attr_name,
'AttributeValue': attr_value}
- response = self.make_request('SetTopicAttributes', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._make_request('SetTopicAttributes', params)
def add_permission(self, topic, label, account_ids, actions):
"""
@@ -144,19 +138,11 @@ def add_permission(self, topic, label, account_ids, actions):
specified principal(s).
"""
- params = {'ContentType': 'JSON',
- 'TopicArn': topic,
+ params = {'TopicArn': topic,
'Label': label}
self.build_list_params(params, account_ids, 'AWSAccountId.member')
self.build_list_params(params, actions, 'ActionName.member')
- response = self.make_request('AddPermission', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._make_request('AddPermission', params)
def remove_permission(self, topic, label):
"""
@@ -170,17 +156,9 @@ def remove_permission(self, topic, label):
to be removed.
"""
- params = {'ContentType': 'JSON',
- 'TopicArn': topic,
+ params = {'TopicArn': topic,
'Label': label}
- response = self.make_request('RemovePermission', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._make_request('RemovePermission', params)
def create_topic(self, topic):
"""
@@ -190,16 +168,8 @@ def create_topic(self, topic):
:param topic: The name of the new topic.
"""
- params = {'ContentType': 'JSON',
- 'Name': topic}
- response = self.make_request('CreateTopic', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ params = {'Name': topic}
+ return self._make_request('CreateTopic', params)
def delete_topic(self, topic):
"""
@@ -209,18 +179,11 @@ def delete_topic(self, topic):
:param topic: The ARN of the topic
"""
- params = {'ContentType': 'JSON',
- 'TopicArn': topic}
- response = self.make_request('DeleteTopic', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ params = {'TopicArn': topic}
+ return self._make_request('DeleteTopic', params, '/', 'GET')
- def publish(self, topic, message, subject=None):
+ def publish(self, topic=None, message=None, subject=None,
+ target_arn=None):
"""
Get properties of a Topic
@@ -236,20 +199,23 @@ def publish(self, topic, message, subject=None):
:param subject: Optional parameter to be used as the "Subject"
line of the email notifications.
+ :type target_arn: string
+ :param target_arn:
+
"""
- params = {'ContentType': 'JSON',
- 'TopicArn': topic,
- 'Message': message}
- if subject:
+ if message is None:
+ # To be backwards compatible when message did not have
+ # a default value and topic and message were required
+ # args.
+ raise TypeError("'message' is a required parameter")
+ params = {'Message': message}
+ if subject is not None:
params['Subject'] = subject
- response = self.make_request('Publish', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ if topic is not None:
+ params['TopicArn'] = topic
+ if target_arn is not None:
+ params['TargetArn'] = target_arn
+ return self._make_request('Publish', params)
def subscribe(self, topic, protocol, endpoint):
"""
@@ -272,18 +238,10 @@ def subscribe(self, topic, protocol, endpoint):
* For https, this would be a URL beginning with https
* For sqs, this would be the ARN of an SQS Queue
"""
- params = {'ContentType': 'JSON',
- 'TopicArn': topic,
+ params = {'TopicArn': topic,
'Protocol': protocol,
'Endpoint': endpoint}
- response = self.make_request('Subscribe', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._make_request('Subscribe', params)
def subscribe_sqs_queue(self, topic, queue):
"""
@@ -356,19 +314,10 @@ def confirm_subscription(self, topic, token,
of the subscription.
"""
- params = {'ContentType': 'JSON',
- 'TopicArn': topic,
- 'Token': token}
+ params = {'TopicArn': topic, 'Token': token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
- response = self.make_request('ConfirmSubscription', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._make_request('ConfirmSubscription', params)
def unsubscribe(self, subscription):
"""
@@ -379,16 +328,8 @@ def unsubscribe(self, subscription):
:param subscription: The ARN of the subscription to be deleted.
"""
- params = {'ContentType': 'JSON',
- 'SubscriptionArn': subscription}
- response = self.make_request('Unsubscribe', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ params = {'SubscriptionArn': subscription}
+ return self._make_request('Unsubscribe', params)
def get_all_subscriptions(self, next_token=None):
"""
@@ -399,17 +340,10 @@ def get_all_subscriptions(self, next_token=None):
this method.
"""
- params = {'ContentType': 'JSON'}
+ params = {}
if next_token:
params['NextToken'] = next_token
- response = self.make_request('ListSubscriptions', params, '/', 'GET')
- body = response.read()
- if response.status == 200:
- return json.loads(body)
- else:
- boto.log.error('%s %s' % (response.status, response.reason))
- boto.log.error('%s' % body)
- raise self.ResponseError(response.status, response.reason, body)
+ return self._make_request('ListSubscriptions', params)
def get_all_subscriptions_by_topic(self, topic, next_token=None):
"""
@@ -424,13 +358,329 @@ def get_all_subscriptions_by_topic(self, topic, next_token=None):
this method.
"""
- params = {'ContentType': 'JSON',
- 'TopicArn': topic}
+ params = {'TopicArn': topic}
if next_token:
params['NextToken'] = next_token
- response = self.make_request('ListSubscriptionsByTopic', params,
- '/', 'GET')
+ return self._make_request('ListSubscriptionsByTopic', params)
+
+ def create_platform_application(self, name=None, platform=None,
+ attributes=None):
+ """
+ The `CreatePlatformApplication` action creates a platform
+ application object for one of the supported push notification
+ services, such as APNS and GCM, to which devices and mobile
+ apps may register. You must specify PlatformPrincipal and
+ PlatformCredential attributes when using the
+ `CreatePlatformApplication` action. The PlatformPrincipal is
+ received from the notification service. For APNS/APNS_SANDBOX,
+ PlatformPrincipal is "SSL certificate". For GCM,
+ PlatformPrincipal is not applicable. For ADM,
+ PlatformPrincipal is "client id". The PlatformCredential is
+ also received from the notification service. For
+ APNS/APNS_SANDBOX, PlatformCredential is "private key". For
+ GCM, PlatformCredential is "API key". For ADM,
+ PlatformCredential is "client secret". The
+ PlatformApplicationArn that is returned when using
+ `CreatePlatformApplication` is then used as an attribute for
+ the `CreatePlatformEndpoint` action. For more information, see
+ `Using Amazon SNS Mobile Push Notifications`_.
+
+ :type name: string
+ :param name: Application names must be made up of only uppercase and
+ lowercase ASCII letters, numbers, underscores, hyphens, and
+ periods, and must be between 1 and 256 characters long.
+
+ :type platform: string
+ :param platform: The following platforms are supported: ADM (Amazon
+ Device Messaging), APNS (Apple Push Notification Service),
+ APNS_SANDBOX, and GCM (Google Cloud Messaging).
+
+ :type attributes: map
+ :param attributes: For a list of attributes, see
+ `SetPlatformApplicationAttributes`_
+
+ """
+ params = {}
+ if name is not None:
+ params['Name'] = name
+ if platform is not None:
+ params['Platform'] = platform
+ if attributes is not None:
+ params['Attributes'] = attributes
+ return self._make_request(action='CreatePlatformApplication',
+ params=params)
+
+ def set_platform_application_attributes(self,
+ platform_application_arn=None,
+ attributes=None):
+ """
+ The `SetPlatformApplicationAttributes` action sets the
+ attributes of the platform application object for the
+ supported push notification services, such as APNS and GCM.
+ For more information, see `Using Amazon SNS Mobile Push
+ Notifications`_.
+
+ :type platform_application_arn: string
+ :param platform_application_arn: PlatformApplicationArn for
+ SetPlatformApplicationAttributes action.
+
+ :type attributes: map
+ :param attributes:
+ A map of the platform application attributes. Attributes in this map
+ include the following:
+
+
+ + `PlatformCredential` -- The credential received from the notification
+ service. For APNS/APNS_SANDBOX, PlatformCredential is "private
+ key". For GCM, PlatformCredential is "API key". For ADM,
+ PlatformCredential is "client secret".
+ + `PlatformPrincipal` -- The principal received from the notification
+ service. For APNS/APNS_SANDBOX, PlatformPrincipal is "SSL
+ certificate". For GCM, PlatformPrincipal is not applicable. For
+ ADM, PlatformPrincipal is "client id".
+ + `EventEndpointCreated` -- Topic ARN to which EndpointCreated event
+ notifications should be sent.
+ + `EventEndpointDeleted` -- Topic ARN to which EndpointDeleted event
+ notifications should be sent.
+ + `EventEndpointUpdated` -- Topic ARN to which EndpointUpdate event
+ notifications should be sent.
+ + `EventDeliveryFailure` -- Topic ARN to which DeliveryFailure event
+ notifications should be sent upon Direct Publish delivery failure
+ (permanent) to one of the application's endpoints.
+
+ """
+ params = {}
+ if platform_application_arn is not None:
+ params['PlatformApplicationArn'] = platform_application_arn
+ if attributes is not None:
+ params['Attributes'] = attributes
+ return self._make_request(action='SetPlatformApplicationAttributes',
+ params=params)
+
+ def get_platform_application_attributes(self,
+ platform_application_arn=None):
+ """
+ The `GetPlatformApplicationAttributes` action retrieves the
+ attributes of the platform application object for the
+ supported push notification services, such as APNS and GCM.
+ For more information, see `Using Amazon SNS Mobile Push
+ Notifications`_.
+
+ :type platform_application_arn: string
+ :param platform_application_arn: PlatformApplicationArn for
+ GetPlatformApplicationAttributesInput.
+
+ """
+ params = {}
+ if platform_application_arn is not None:
+ params['PlatformApplicationArn'] = platform_application_arn
+ return self._make_request(action='GetPlatformApplicationAttributes',
+ params=params)
+
+ def list_platform_applications(self, next_token=None):
+ """
+ The `ListPlatformApplications` action lists the platform
+ application objects for the supported push notification
+ services, such as APNS and GCM. The results for
+ `ListPlatformApplications` are paginated and return a limited
+ list of applications, up to 100. If additional records are
+ available after the first page results, then a NextToken
+ string will be returned. To receive the next page, you call
+ `ListPlatformApplications` using the NextToken string received
+ from the previous call. When there are no more records to
+ return, NextToken will be null. For more information, see
+ `Using Amazon SNS Mobile Push Notifications`_.
+
+ :type next_token: string
+ :param next_token: NextToken string is used when calling
+ ListPlatformApplications action to retrieve additional records that
+ are available after the first page results.
+
+ """
+ params = {}
+ if next_token is not None:
+ params['NextToken'] = next_token
+ return self._make_request(action='ListPlatformApplications',
+ params=params)
+
+ def list_endpoints_by_platform_application(self,
+ platform_application_arn=None,
+ next_token=None):
+ """
+ The `ListEndpointsByPlatformApplication` action lists the
+ endpoints and endpoint attributes for devices in a supported
+ push notification service, such as GCM and APNS. The results
+ for `ListEndpointsByPlatformApplication` are paginated and
+ return a limited list of endpoints, up to 100. If additional
+ records are available after the first page results, then a
+ NextToken string will be returned. To receive the next page,
+ you call `ListEndpointsByPlatformApplication` again using the
+ NextToken string received from the previous call. When there
+ are no more records to return, NextToken will be null. For
+ more information, see `Using Amazon SNS Mobile Push
+ Notifications`_.
+
+ :type platform_application_arn: string
+ :param platform_application_arn: PlatformApplicationArn for
+ ListEndpointsByPlatformApplicationInput action.
+
+ :type next_token: string
+ :param next_token: NextToken string is used when calling
+ ListEndpointsByPlatformApplication action to retrieve additional
+ records that are available after the first page results.
+
+ """
+ params = {}
+ if platform_application_arn is not None:
+ params['PlatformApplicationArn'] = platform_application_arn
+ if next_token is not None:
+ params['NextToken'] = next_token
+ return self._make_request(action='ListEndpointsByPlatformApplication',
+ params=params)
+
+ def delete_platform_application(self, platform_application_arn=None):
+ """
+ The `DeletePlatformApplication` action deletes a platform
+ application object for one of the supported push notification
+ services, such as APNS and GCM. For more information, see
+ `Using Amazon SNS Mobile Push Notifications`_.
+
+ :type platform_application_arn: string
+ :param platform_application_arn: PlatformApplicationArn of platform
+ application object to delete.
+
+ """
+ params = {}
+ if platform_application_arn is not None:
+ params['PlatformApplicationArn'] = platform_application_arn
+ return self._make_request(action='DeletePlatformApplication',
+ params=params)
+
+ def create_platform_endpoint(self, platform_application_arn=None,
+ token=None, custom_user_data=None,
+ attributes=None):
+ """
+ The `CreatePlatformEndpoint` creates an endpoint for a device
+ and mobile app on one of the supported push notification
+ services, such as GCM and APNS. `CreatePlatformEndpoint`
+ requires the PlatformApplicationArn that is returned from
+ `CreatePlatformApplication`. The EndpointArn that is returned
+ when using `CreatePlatformEndpoint` can then be used by the
+ `Publish` action to send a message to a mobile app or by the
+ `Subscribe` action for subscription to a topic. For more
+ information, see `Using Amazon SNS Mobile Push
+ Notifications`_.
+
+ :type platform_application_arn: string
+ :param platform_application_arn: PlatformApplicationArn returned from
+ CreatePlatformApplication is used to create a an endpoint.
+
+ :type token: string
+ :param token: Unique identifier created by the notification service for
+ an app on a device. The specific name for Token will vary,
+ depending on which notification service is being used. For example,
+ when using APNS as the notification service, you need the device
+ token. Alternatively, when using GCM or ADM, the device token
+ equivalent is called the registration ID.
+
+ :type custom_user_data: string
+ :param custom_user_data: Arbitrary user data to associate with the
+ endpoint. SNS does not use this data. The data must be in UTF-8
+ format and less than 2KB.
+
+ :type attributes: map
+ :param attributes: For a list of attributes, see
+ `SetEndpointAttributes`_.
+
+ """
+ params = {}
+ if platform_application_arn is not None:
+ params['PlatformApplicationArn'] = platform_application_arn
+ if token is not None:
+ params['Token'] = token
+ if custom_user_data is not None:
+ params['CustomUserData'] = custom_user_data
+ if attributes is not None:
+ params['Attributes'] = attributes
+ return self._make_request(action='CreatePlatformEndpoint',
+ params=params)
+
+ def delete_endpoint(self, endpoint_arn=None):
+ """
+ The `DeleteEndpoint` action, which is idempotent, deletes the
+ endpoint from SNS. For more information, see `Using Amazon SNS
+ Mobile Push Notifications`_.
+
+ :type endpoint_arn: string
+ :param endpoint_arn: EndpointArn of endpoint to delete.
+
+ """
+ params = {}
+ if endpoint_arn is not None:
+ params['EndpointArn'] = endpoint_arn
+ return self._make_request(action='DeleteEndpoint', params=params)
+
+ def set_endpoint_attributes(self, endpoint_arn=None, attributes=None):
+ """
+ The `SetEndpointAttributes` action sets the attributes for an
+ endpoint for a device on one of the supported push
+ notification services, such as GCM and APNS. For more
+ information, see `Using Amazon SNS Mobile Push
+ Notifications`_.
+
+ :type endpoint_arn: string
+ :param endpoint_arn: EndpointArn used for SetEndpointAttributes action.
+
+ :type attributes: map
+ :param attributes:
+ A map of the endpoint attributes. Attributes in this map include the
+ following:
+
+
+ + `CustomUserData` -- arbitrary user data to associate with the
+ endpoint. SNS does not use this data. The data must be in UTF-8
+ format and less than 2KB.
+ + `Enabled` -- flag that enables/disables delivery to the endpoint.
+ Message Processor will set this to false when a notification
+ service indicates to SNS that the endpoint is invalid. Users can
+ set it back to true, typically after updating Token.
+ + `Token` -- device token, also referred to as a registration id, for
+ an app and mobile device. This is returned from the notification
+ service when an app and mobile device are registered with the
+ notification service.
+
+ """
+ params = {}
+ if endpoint_arn is not None:
+ params['EndpointArn'] = endpoint_arn
+ if attributes is not None:
+ params['Attributes'] = attributes
+ return self._make_request(action='SetEndpointAttributes',
+ params=params)
+
+ def get_endpoint_attributes(self, endpoint_arn=None):
+ """
+ The `GetEndpointAttributes` retrieves the endpoint attributes
+ for a device on one of the supported push notification
+ services, such as GCM and APNS. For more information, see
+ `Using Amazon SNS Mobile Push Notifications`_.
+
+ :type endpoint_arn: string
+ :param endpoint_arn: EndpointArn for GetEndpointAttributes input.
+
+ """
+ params = {}
+ if endpoint_arn is not None:
+ params['EndpointArn'] = endpoint_arn
+ return self._make_request(action='GetEndpointAttributes',
+ params=params)
+
+ def _make_request(self, action, params, path='/', verb='GET'):
+ params['ContentType'] = 'JSON'
+ response = self.make_request(action=action, verb=verb,
+ path=path, params=params)
body = response.read()
+ boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
View
23 docs/source/contributing.rst
@@ -202,3 +202,26 @@ and uses `restructured text`_ for the markup language.
.. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/
.. _sphinx: http://sphinx.pocoo.org/
.. _restructured text: http://sphinx.pocoo.org/rest.html
+
+
+Merging A Branch (Core Devs)
+============================
+
+* All features/bugfixes should go through a review.
+
+ * This includes new features added by core devs themselves. The usual
+ branch/pull-request/merge flow that happens for community contributions
+ should also apply to core.
+
+* Ensure there is proper test coverage. If there's a change in behavior, there
+ should be a test demonstrating the failure before the change & passing with
+ the change.
+
+ * This helps ensure we don't regress in the future as well.
+
+* Merging of pull requests is typically done with
+ ``git merge --no-ff <remote/branch_name>``.
+
+ * GitHub's big green button is probably OK for very small PRs (like doc
+ fixes), but you can't run tests on GH, so most things should get pulled
+ down locally.
View
1  docs/source/index.rst
@@ -111,6 +111,7 @@ Release Notes
.. toctree::
:titlesonly:
+ releasenotes/v2.10.0
releasenotes/v2.9.9
releasenotes/v2.9.8
releasenotes/v2.9.7
View
54 docs/source/releasenotes/v2.10.0.rst
@@ -0,0 +1,54 @@
+boto v2.10.0
+============
+
+:date: 2013/08/13
+
+This release adds Mobile Push Notification support to Amazon Simple Notification
+Service, better reporting for Amazon Redshift, SigV4 authorization for Amazon
+Elastic MapReduce & lots of bugfixes.
+
+
+Features
+--------
+
+* Added support for Mobile Push Notifications to SNS. This enables you to send
+ push notifications to mobile devices (such as iOS or Android) using SNS.
+ (:sha:`ccba574`)
+* Added support for better reporting within Redshift. (:sha:`9d55dd3`)
+* Switched Elastic MapReduce to use SigV4 for authorization. (:sha:`b80aa48`)
+
+
+Bugfixes
+--------
+
+* Added the ``MinAdjustmentType`` parameter to EC2 Autoscaling. (:issue:`1562`,
+ :issue:`1619`, :sha:`1760284`, :sha:`2a11fd9`, :sha:`2d14006` &
+ :sha:`b7f1ae1`)
+* Fixed how DynamoDB tracks changes to data in ``Item`` objects, fixing
+ failures with modified sets not being sent. (:issue:`1565`,
+ :sha:`b111fcf` & :sha:`812f9a6`)
+* Updated the CA certificates Boto ships with. (:issue:`1578`, :sha:`4dfadc8`)
+* Fixed how CloudSearch's ``Layer2`` object gets initialized. (:issue:`1629`,
+ :issue:`1630`, :sha:`40b3652` & :sha:`f797ff9`)
+* Fixed the ``-w`` flag in ``s3put``. (:issue:`1637`, :sha:`0865004` &
+ :sha:`3fe70ca`)
+* Added the ``ap-southeast-2`` endpoint for DynamoDB. (:issue:`1621`,
+ :sha:`501b637`)
+* Fixed test suite to run faster. (:sha:`243a67e`)
+* Fixed how non-JSON responses are caught from CloudSearch. (:issue:`1633`,
+ :issue:`1645`, :sha:`d5a5c01`, :sha:`954a50c`, :sha:`915d8ff` &
+ :sha:`4407fcb`)
+* Fixed how ``DeviceIndex`` is parsed from EC2. (:issue:`1632`, :issue:`1646`,
+ :sha:`ff15e1f`, :sha:`8337a0b` & :sha:`27c9b04`)
+* Fixed EC2's ``connect_to_region`` to respect the ``region`` parameter. (
+ :issue:`1616`, :issue:`1654`, :sha:`9c37256`, :sha:`5950d12` & :sha:`b7eebe8`)
+* Added ``modify_network_interface_atribute`` to EC2 connections.
+ (:issue:`1613`, :issue:`1656`, :sha:`e00b601`, :sha:`5b62f27`, :sha:`126f6e9`,
+ :sha:`bbfed1f` & :sha:`0c61293`)
+* Added support for ``param_group`` within RDS. (:issue:`1639`, :sha:`c47baf0`)
+* Added support for using ``Item.partial_save`` to create new records within
+ DynamoDBv2. (:issue:`1660`, :issue:`1521`, :sha:`bfa469f` & :sha:`58a13d7`)
+* Several documentation improvements/fixes:
+
+ * Updated guideline on how core should merge PRs. (:sha:`80a419c`)
+ * Fixed a typo in a CloudFront docstring. (:issue:`1657`, :sha:`1aa0621`)
View
2  tests/integration/cloudsearch/test_cert_verification.py
@@ -31,7 +31,7 @@
class CertVerificationTest(unittest.TestCase):
- rds = True
+ cloudsearch = True
ssl = True
def test_certs(self):
View
75 tests/integration/cloudsearch/test_layers.py
@@ -0,0 +1,75 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Tests for Layer1 of Cloudsearch
+"""
+import time
+
+from tests.unit import unittest
+from boto.cloudsearch.layer1 import Layer1
+from boto.cloudsearch.layer2 import Layer2
+from boto.regioninfo import RegionInfo
+
+
+class CloudSearchLayer1Test(unittest.TestCase):
+ cloudsearch = True
+
+ def setUp(self):
+ super(CloudSearchLayer1Test, self).setUp()
+ self.layer1 = Layer1()
+ self.domain_name = 'test-%d' % int(time.time())
+
+ def test_create_domain(self):
+ resp = self.layer1.create_domain(self.domain_name)
+ self.addCleanup(self.layer1.delete_domain, self.domain_name)
+ self.assertTrue(resp.get('created', False))
+
+
+class CloudSearchLayer2Test(unittest.TestCase):
+ cloudsearch = True
+
+ def setUp(self):
+ super(CloudSearchLayer2Test, self).setUp()
+ self.layer2 = Layer2()
+ self.domain_name = 'test-%d' % int(time.time())
+
+ def test_create_domain(self):
+ domain = self.layer2.create_domain(self.domain_name)
+ self.addCleanup(domain.delete)
+ self.assertTrue(domain.created, False)
+ self.assertEqual(domain.domain_name, self.domain_name)
+ self.assertEqual(domain.num_searchable_docs, 0)
+
+ def test_initialization_regression(self):
+ us_west_2 = RegionInfo(
+ name='us-west-2',
+ endpoint='cloudsearch.us-west-2.amazonaws.com'
+ )
+ self.layer2 = Layer2(
+ region=us_west_2,
+ host='cloudsearch.us-west-2.amazonaws.com'
+ )
+ self.assertEqual(
+ self.layer2.layer1.host,
+ 'cloudsearch.us-west-2.amazonaws.com'
+ )
View
44 tests/integration/dynamodb2/test_highlevel.py
@@ -28,6 +28,7 @@
from tests.unit import unittest
from boto.dynamodb2 import exceptions
from boto.dynamodb2.fields import HashKey, RangeKey, KeysOnlyIndex
+from boto.dynamodb2.items import Item
from boto.dynamodb2.table import Table
from boto.dynamodb2.types import NUMBER
@@ -146,7 +147,10 @@ def test_integration(self):
self.assertEqual(check_name_again['first_name'], 'Joan')
# Reset it.
- jane.mark_dirty()
+ jane['username'] = 'jane'
+ jane['first_name'] = 'Jane'
+ jane['last_name'] = 'Doe'
+ jane['friend_count'] = 3
self.assertTrue(jane.save(overwrite=True))
# Test the partial update behavior.
@@ -176,9 +180,27 @@ def test_integration(self):
self.assertEqual(partial_jane['first_name'], 'Jacqueline')
# Reset it.
- jane.mark_dirty()
+ jane['username'] = 'jane'
+ jane['first_name'] = 'Jane'
+ jane['last_name'] = 'Doe'
+ jane['friend_count'] = 3
self.assertTrue(jane.save(overwrite=True))
+ # Ensure that partial saves of a brand-new object work.
+ sadie = Item(users, data={
+ 'username': 'sadie',
+ 'first_name': 'Sadie',
+ 'favorite_band': 'Zedd',
+ 'friend_count': 7
+ })
+ self.assertTrue(sadie.partial_save())
+ serverside_sadie = users.get_item(
+ username='sadie',
+ friend_count=7,
+ consistent=True
+ )
+ self.assertEqual(serverside_sadie['first_name'], 'Sadie')
+
# Test the eventually consistent query.
results = users.query(
username__eq='johndoe',
@@ -274,3 +296,21 @@ def test_integration(self):
)
# But it shouldn't break on more complex tables.
res = users.query(username__eq='johndoe')
+
+ # Test putting with/without sets.
+ mau5_created = users.put_item(data