Permalink
Browse files

Merge branch 'release-2.9.6'

* release-2.9.6: (33 commits)
  Bumping version to 2.9.6
  Added release notes for v2.9.6.
  Added SigV4 support to SNS.
  Fix annoying typo in basic DynamoDB example
  Safely re-introduced the change from SHA: dec541.
  Revert "Preserve trailing / when canonicalizing URI path for signature V4"
  Preserve trailing / when canonicalizing URI path for signature V4
  Added a failing testcase for trailing path slashes in SigV4.
  Add new CloudSearch regions. Closes #1465.
  Pass generation query param to boto.gs.Key.open_read()
  Factored out how query args are constructed when fetching all keys.
  Added an exception to signal a retry should occur.
  Added the ``ap-northeast-1`` region to Redshift.
  Update connection.py: get_path when suppress_consec_slashes is False
  Added a failing test for overeager ``suppress_consec_slashes``.
  Added ``retrieve_inventory_job`` to ``Vault``.
  Fixed a bug where 400s w/ timeouts were not honored.
  RangeKey was missing from the import and there was two hashkeys in Table.create
  Deal with empty facets queries on cloudsearch (re #1366)
  Fix AttributeErrors thrown when LoadBalancerZones is used by adding endElement stub
  ...
  • Loading branch information...
2 parents 02815ba + 8e6c326 commit 25be2b7de7065a32439fa13d0e88fc30b4788381 @toastdriven toastdriven committed Jun 18, 2013
Showing with 806 additions and 86 deletions.
  1. +2 −2 README.rst
  2. +1 −1 boto/__init__.py
  3. +4 −1 boto/auth.py
  4. +10 −0 boto/cloudsearch/__init__.py
  5. +2 −1 boto/cloudsearch/search.py
  6. +13 −5 boto/connection.py
  7. +3 −0 boto/dynamodb2/items.py
  8. +23 −5 boto/ec2/autoscale/__init__.py
  9. +18 −0 boto/ec2/autoscale/scheduled.py
  10. +2 −0 boto/ec2/elb/loadbalancer.py
  11. +1 −1 boto/ec2/instancestatus.py
  12. +6 −6 boto/ec2/networkinterface.py
  13. +15 −0 boto/exception.py
  14. +21 −2 boto/glacier/vault.py
  15. +20 −0 boto/glacier/writer.py
  16. +31 −0 boto/gs/key.py
  17. +3 −0 boto/redshift/__init__.py
  18. +25 −14 boto/s3/bucket.py
  19. +8 −4 boto/s3/connection.py
  20. +55 −16 boto/s3/key.py
  21. +1 −1 boto/sns/connection.py
  22. +1 −1 boto/utils.py
  23. +2 −1 docs/source/migrations/dynamodb_v1_to_v2.rst
  24. +1 −1 docs/source/releasenotes/v2.9.5.rst
  25. +56 −0 docs/source/releasenotes/v2.9.6.rst
  26. +6 −0 tests/integration/s3/mock_storage_service.py
  27. +11 −1 tests/integration/s3/test_key.py
  28. 0 tests/integration/storage_uri/__init__.py
  29. +63 −0 tests/integration/storage_uri/test_storage_uri.py
  30. +18 −0 tests/unit/auth/test_sigv4.py
  31. +48 −16 tests/unit/cloudsearch/test_search.py
  32. +10 −0 tests/unit/dynamodb2/test_table.py
  33. +38 −0 tests/unit/ec2/autoscale/test_group.py
  34. 0 tests/unit/ec2/elb/__init__.py
  35. +33 −0 tests/unit/ec2/elb/test_loadbalancer.py
  36. +32 −0 tests/unit/ec2/test_instancestatus.py
  37. +51 −0 tests/unit/glacier/test_vault.py
  38. +45 −0 tests/unit/glacier/test_writer.py
  39. +52 −0 tests/unit/s3/test_bucket.py
  40. +51 −0 tests/unit/s3/test_key.py
  41. +2 −6 tests/unit/sns/test_connection.py
  42. +22 −1 tests/unit/test_connection.py
View
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.9.5
+boto 2.9.6
-Released: 28-May-2013
+Released: 18-June-2013
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
View
@@ -36,7 +36,7 @@
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.9.5'
+__version__ = '2.9.6'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
View
@@ -384,10 +384,13 @@ def signed_headers(self, headers_to_sign):
return ';'.join(l)
def canonical_uri(self, http_request):
+ path = http_request.auth_path
# Normalize the path.
- normalized = posixpath.normpath(http_request.auth_path)
+ normalized = posixpath.normpath(path)
# Then urlencode whatever's left.
encoded = urllib.quote(normalized)
+ if len(path) > 1 and path.endswith('/'):
+ encoded += '/'
return encoded
def payload(self, http_request):
@@ -38,6 +38,16 @@ def regions():
RegionInfo(name='eu-west-1',
endpoint='cloudsearch.eu-west-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='us-west-1',
+ endpoint='cloudsearch.us-west-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='us-west-2',
+ endpoint='cloudsearch.us-west-2.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='ap-southeast-1',
+ endpoint='cloudsearch.ap-southeast-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+
]
@@ -54,7 +54,8 @@ def __init__(self, **attrs):
self.facets = {}
if 'facets' in attrs:
for (facet, values) in attrs['facets'].iteritems():
- self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints']))
+ if 'constraints' in values:
+ self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints']))
self.num_pages_needed = ceil(self.hits / self.query.real_size)
View
@@ -67,8 +67,10 @@
import boto.cacerts
from boto import config, UserAgent
-from boto.exception import AWSConnectionError, BotoClientError
+from boto.exception import AWSConnectionError
+from boto.exception import BotoClientError
from boto.exception import BotoServerError
+from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.resultset import ResultSet
@@ -598,7 +600,7 @@ def get_path(self, path='/'):
# https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8
# You can override that behavior with the suppress_consec_slashes param.
if not self.suppress_consec_slashes:
- return self.path + re.sub('^/*', "", path)
+ return self.path + re.sub('^(/*)/', "\\1", path)
pos = path.find('?')
if pos >= 0:
params = path[pos:]
@@ -878,6 +880,11 @@ def _mexe(self, request, sender=None, override_num_retries=None,
scheme == 'https')
response = None
continue
+ except PleaseRetryException, e:
+ boto.log.debug('encountered a retry exception: %s' % e)
+ connection = self.new_http_connection(request.host,
+ self.is_secure)
+ response = e.response
except self.http_exceptions, e:
for unretryable in self.http_unretryable_exceptions:
if isinstance(e, unretryable):
@@ -894,7 +901,7 @@ def _mexe(self, request, sender=None, override_num_retries=None,
# If we made it here, it's because we have exhausted our retries
# and stil haven't succeeded. So, if we have a response object,
# use it to raise an exception.
- # Otherwise, raise the exception that must have already h#appened.
+ # Otherwise, raise the exception that must have already happened.
if response:
raise BotoServerError(response.status, response.reason, body)
elif e:
@@ -930,13 +937,14 @@ def build_base_http_request(self, method, path, auth_path,
def make_request(self, method, path, headers=None, data='', host=None,
auth_path=None, sender=None, override_num_retries=None,
- params=None):
+ params=None, retry_handler=None):
"""Makes a request to the server, with stock multiple-retry logic."""
if params is None:
params = {}
http_request = self.build_base_http_request(method, path, auth_path,
params, headers, data, host)
- return self._mexe(http_request, sender, override_num_retries)
+ return self._mexe(http_request, sender, override_num_retries,
+ retry_handler=retry_handler)
def close(self):
"""(Optional) Close any open HTTP connections. This is non-destructive,
@@ -102,6 +102,9 @@ def values(self):
def items(self):
return self._data.items()
+ def get(self, key, default=None):
+ return self._data.get(key, default)
+
def __iter__(self):
for key in self._data:
yield self._data[key]
@@ -552,9 +552,11 @@ def resume_processes(self, as_group, scaling_processes=None):
'ScalingProcesses')
return self.get_status('ResumeProcesses', params)
- def create_scheduled_group_action(self, as_group, name, time,
+ def create_scheduled_group_action(self, as_group, name, time=None,
desired_capacity=None,
- min_size=None, max_size=None):
+ min_size=None, max_size=None,
+ start_time=None, end_time=None,
+ recurrence=None):
"""
Creates a scheduled scaling action for a Auto Scaling group. If you
leave a parameter unspecified, the corresponding value remains
@@ -567,7 +569,7 @@ def create_scheduled_group_action(self, as_group, name, time,
:param name: Scheduled action name.
:type time: datetime.datetime
- :param time: The time for this action to start.
+ :param time: The time for this action to start. (Depracated)
:type desired_capacity: int
:param desired_capacity: The number of EC2 instances that should
@@ -578,10 +580,26 @@ def create_scheduled_group_action(self, as_group, name, time,
:type max_size: int
:param max_size: The minimum size for the new auto scaling group.
+
+ :type start_time: datetime.datetime
+ :param start_time: The time for this action to start. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.
+
+ :type end_time: datetime.datetime
+ :param end_time: The time for this action to end. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop.
+
+ :type recurrence: string
+ :param recurrence: The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format. EXAMPLE: '0 10 * * *'
"""
params = {'AutoScalingGroupName': as_group,
- 'ScheduledActionName': name,
- 'Time': time.isoformat()}
+ 'ScheduledActionName': name}
+ if start_time is not None:
+ params['StartTime'] = start_time.isoformat()
+ if end_time is not None:
+ params['EndTime'] = end_time.isoformat()
+ if recurrence is not None:
+ params['Recurrence'] = recurrence
+ if time:
+ params['Time'] = time.isoformat()
if desired_capacity is not None:
params['DesiredCapacity'] = desired_capacity
if min_size is not None:
@@ -28,7 +28,11 @@ def __init__(self, connection=None):
self.connection = connection
self.name = None
self.action_arn = None
+ self.as_group = None
self.time = None
+ self.start_time = None
+ self.end_time = None
+ self.recurrence = None
self.desired_capacity = None
self.max_size = None
self.min_size = None
@@ -44,17 +48,31 @@ def endElement(self, name, value, connection):
self.desired_capacity = value
elif name == 'ScheduledActionName':
self.name = value
+ elif name == 'AutoScalingGroupName':
+ self.as_group = value
elif name == 'MaxSize':
self.max_size = int(value)
elif name == 'MinSize':
self.min_size = int(value)
elif name == 'ScheduledActionARN':
self.action_arn = value
+ elif name == 'Recurrence':
+ self.recurrence = value
elif name == 'Time':
try:
self.time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ elif name == 'StartTime':
+ try:
+ self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
+ except ValueError:
+ self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ elif name == 'EndTime':
+ try:
+ self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
+ except ValueError:
+ self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
else:
setattr(self, name, value)
@@ -42,6 +42,8 @@ def startElement(self, name, attrs, connection):
if name == 'AvailabilityZones':
return self.zones
+ def endElement(self, name, value, connection):
+ pass
class LoadBalancer(object):
"""
@@ -207,6 +207,6 @@ def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
- if name == 'NextToken':
+ if name == 'nextToken':
self.next_token = value
setattr(self, name, value)
@@ -193,8 +193,8 @@ def __init__(self, *interfaces):
self.extend(interfaces)
def build_list_params(self, params, prefix=''):
- for i, spec in enumerate(self, 1):
- full_prefix = '%sNetworkInterface.%s.' % (prefix, i)
+ for i, spec in enumerate(self):
+ full_prefix = '%sNetworkInterface.%s.' % (prefix, i+1)
if spec.network_interface_id is not None:
params[full_prefix + 'NetworkInterfaceId'] = \
str(spec.network_interface_id)
@@ -215,13 +215,13 @@ def build_list_params(self, params, prefix=''):
params[full_prefix + 'PrivateIpAddress'] = \
str(spec.private_ip_address)
if spec.groups is not None:
- for j, group_id in enumerate(spec.groups, 1):
- query_param_key = '%sSecurityGroupId.%s' % (full_prefix, j)
+ for j, group_id in enumerate(spec.groups):
+ query_param_key = '%sSecurityGroupId.%s' % (full_prefix, j+1)
params[query_param_key] = str(group_id)
if spec.private_ip_addresses is not None:
- for k, ip_addr in enumerate(spec.private_ip_addresses, 1):
+ for k, ip_addr in enumerate(spec.private_ip_addresses):
query_param_key_prefix = (
- '%sPrivateIpAddresses.%s' % (full_prefix, k))
+ '%sPrivateIpAddresses.%s' % (full_prefix, k+1))
params[query_param_key_prefix + '.PrivateIpAddress'] = \
str(ip_addr.private_ip_address)
if ip_addr.primary is not None:
View
@@ -474,3 +474,18 @@ class TooManyRecordsException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
+
+
+class PleaseRetryException(Exception):
+ """
+ Indicates a request should be retried.
+ """
+ def __init__(self, message, response=None):
+ self.message = message
+ self.response = response
+
+ def __repr__(self):
+ return 'PleaseRetryException("%s", %s)' % (
+ self.message,
+ self.response
+ )
View
@@ -315,8 +315,8 @@ def retrieve_inventory(self, sns_topic=None,
sends notification when the job is completed and the output
is ready for you to download.
- :rtype: :class:`boto.glacier.job.Job`
- :return: A Job object representing the retrieval job.
+ :rtype: str
+ :return: The ID of the job
"""
job_data = {'Type': 'inventory-retrieval'}
if sns_topic is not None:
@@ -327,6 +327,25 @@ def retrieve_inventory(self, sns_topic=None,
response = self.layer1.initiate_job(self.name, job_data)
return response['JobId']
+ def retrieve_inventory_job(self, **kwargs):
+ """
+ Identical to ``retrieve_inventory``, but returns a ``Job`` instance
+ instead of just the job ID.
+
+ :type description: str
+ :param description: An optional description for the job.
+
+ :type sns_topic: str
+ :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
+ sends notification when the job is completed and the output
+ is ready for you to download.
+
+ :rtype: :class:`boto.glacier.job.Job`
+ :return: A Job object representing the retrieval job.
+ """
+ job_id = self.retrieve_inventory(**kwargs)
+ return self.get_job(job_id)
+
def delete_archive(self, archive_id):
"""
This operation deletes an archive from the vault.
@@ -234,6 +234,26 @@ def get_archive_id(self):
return self.uploader.archive_id
@property
+ def current_tree_hash(self):
+ """
+ Returns the current tree hash for the data that's been written
+ **so far**.
+
+ Only once the writing is complete is the final tree hash returned.
+ """
+ return tree_hash(self.uploader._tree_hashes)
+
+ @property
+ def current_uploaded_size(self):
+ """
+ Returns the current uploaded size for the data that's been written
+ **so far**.
+
+ Only once the writing is complete is the final uploaded size returned.
+ """
+ return self.uploader._uploaded_size
+
+ @property
def upload_id(self):
return self.uploader.upload_id
View
@@ -118,6 +118,37 @@ def handle_addl_headers(self, headers):
elif key == 'x-goog-component-count':
self.component_count = int(value)
+ def open_read(self, headers=None, query_args='',
+ override_num_retries=None, response_headers=None):
+ """
+ Open this key for reading
+
+ :type headers: dict
+ :param headers: Headers to pass in the web request
+
+ :type query_args: string
+ :param query_args: Arguments to pass in the query string
+ (ie, 'torrent')
+
+ :type override_num_retries: int
+ :param override_num_retries: If not None will override configured
+ num_retries parameter for underlying GET.
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/EWOPb for details.
+ """
+ # For GCS we need to include the object generation in the query args.
+ # The rest of the processing is handled in the parent class.
+ if self.generation:
+ if query_args:
+ query_args += '&'
+ query_args += 'generation=%s' % self.generation
+ super(Key, self).open_read(headers=headers, query_args=query_args,
+ override_num_retries=override_num_retries,
+ response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
Oops, something went wrong.

0 comments on commit 25be2b7

Please sign in to comment.