Skip to content
Browse files

Merge branch 'release-2.23.0'

  • Loading branch information...
2 parents 487ea4e + 9f4e4e7 commit 5b3d2053d443fc29271b2045b09adb515949394e @toastdriven toastdriven committed Jan 10, 2014
Showing with 1,769 additions and 622 deletions.
  1. +2 −2 README.rst
  2. +1 −1 boto/__init__.py
  3. +6 −6 boto/cloudfront/distribution.py
  4. +1 −1 boto/cloudfront/identity.py
  5. +1 −1 boto/cloudfront/invalidation.py
  6. +1 −1 boto/cloudsearch/layer1.py
  7. +7 −3 boto/connection.py
  8. +1 −1 boto/core/dictresponse.py
  9. +3 −3 boto/dynamodb/item.py
  10. +3 −0 boto/dynamodb/types.py
  11. +55 −4 boto/dynamodb2/table.py
  12. +9 −5 boto/ec2/cloudwatch/__init__.py
  13. +78 −16 boto/ec2/connection.py
  14. +1 −1 boto/ec2/elb/loadbalancer.py
  15. +1 −1 boto/ecs/__init__.py
  16. +3 −3 boto/ecs/item.py
  17. +2 −0 boto/glacier/concurrent.py
  18. +27 −2 boto/glacier/job.py
  19. +861 −221 boto/glacier/layer1.py
  20. +40 −1 boto/glacier/vault.py
  21. +9 −9 boto/gs/key.py
  22. +5 −7 boto/gs/resumable_upload_handler.py
  23. +1 −1 boto/handler.py
  24. +1 −1 boto/jsonresponse.py
  25. +1 −1 boto/manage/cmdshell.py
  26. +1 −1 boto/manage/server.py
  27. +1 −1 boto/manage/task.py
  28. +8 −8 boto/manage/volume.py
  29. +1 −1 boto/mashups/order.py
  30. +1 −1 boto/mturk/layoutparam.py
  31. +1 −1 boto/mturk/qualification.py
  32. +19 −14 boto/mws/connection.py
  33. +21 −5 boto/provider.py
  34. +1 −1 boto/pyami/config.py
  35. +1 −1 boto/pyami/installers/ubuntu/ebs.py
  36. +5 −5 boto/rds/__init__.py
  37. +1 −1 boto/rds/dbsubnetgroup.py
  38. +1 −1 boto/rds/parametergroup.py
  39. +7 −7 boto/route53/record.py
  40. +2 −2 boto/s3/connection.py
  41. +11 −11 boto/s3/key.py
  42. +1 −1 boto/s3/resumable_download_handler.py
  43. +1 −1 boto/sdb/connection.py
  44. +1 −1 boto/sdb/db/key.py
  45. +9 −9 boto/sdb/db/manager/sdbmanager.py
  46. +3 −3 boto/sdb/db/manager/xmlmanager.py
  47. +1 −1 boto/sdb/db/model.py
  48. +8 −8 boto/sdb/db/property.py
  49. +1 −1 boto/sdb/db/query.py
  50. +9 −9 boto/sdb/db/sequence.py
  51. +1 −1 boto/sdb/item.py
  52. +3 −3 boto/sdb/queryresultset.py
  53. +1 −1 boto/services/message.py
  54. +1 −1 boto/sqs/message.py
  55. +207 −215 boto/support/layer1.py
  56. +1 −0 docs/source/index.rst
  57. +8 −0 docs/source/ref/autoscale.rst
  58. +49 −0 docs/source/releasenotes/v2.23.0.rst
  59. +4 −1 scripts/git-release-notes.py
  60. +10 −10 tests/integration/__init__.py
  61. +10 −0 tests/integration/dynamodb2/test_highlevel.py
  62. +12 −0 tests/unit/dynamodb/test_types.py
  63. +77 −1 tests/unit/dynamodb2/test_table.py
  64. +45 −0 tests/unit/ec2/test_connection.py
  65. +22 −0 tests/unit/glacier/test_job.py
  66. +17 −0 tests/unit/glacier/test_layer2.py
  67. +27 −2 tests/unit/mws/test_connection.py
  68. +38 −0 tests/unit/provider/test_provider.py
View
4 README.rst
@@ -1,9 +1,9 @@
####
boto
####
-boto 2.22.1
+boto 2.23.0
-Released: 6-January-2014
+Released: 10-January-2014
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
View
2 boto/__init__.py
@@ -37,7 +37,7 @@
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.22.1'
+__version__ = '2.23.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
View
12 boto/cloudfront/distribution.py
@@ -350,11 +350,11 @@ def update(self, enabled=None, cnames=None, comment=None):
self.config.cnames, self.config.comment,
self.config.trusted_signers,
self.config.default_root_object)
- if enabled != None:
+ if enabled is not None:
new_config.enabled = enabled
- if cnames != None:
+ if cnames is not None:
new_config.cnames = cnames
- if comment != None:
+ if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
@@ -730,11 +730,11 @@ def update(self, enabled=None, cnames=None, comment=None):
self.config.cnames,
self.config.comment,
self.config.trusted_signers)
- if enabled != None:
+ if enabled is not None:
new_config.enabled = enabled
- if cnames != None:
+ if cnames is not None:
new_config.cnames = cnames
- if comment != None:
+ if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
View
2 boto/cloudfront/identity.py
@@ -52,7 +52,7 @@ def update(self, comment=None):
new_config = OriginAccessIdentityConfig(self.connection,
self.config.caller_reference,
self.config.comment)
- if comment != None:
+ if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config)
self.config = new_config
View
2 boto/cloudfront/invalidation.py
@@ -75,7 +75,7 @@ def escape(self, p):
def to_xml(self):
"""Get this batch as XML"""
- assert self.connection != None
+ assert self.connection is not None
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<InvalidationBatch xmlns="http://cloudfront.amazonaws.com/doc/%s/">\n' % self.connection.Version
for p in self.paths:
View
2 boto/cloudsearch/layer1.py
@@ -88,7 +88,7 @@ def get_response(self, doc_path, action, params, path='/',
for p in doc_path:
inner = inner.get(p)
if not inner:
- return None if list_marker == None else []
+ return None if list_marker is None else []
if isinstance(inner, list):
return inner
else:
View
10 boto/connection.py
@@ -434,6 +434,10 @@ def __init__(self, host, aws_access_key_id=None,
:keyword str aws_secret_access_key: Your AWS Secret Access Key
(provided by Amazon). If none is specified, the value in your
``AWS_SECRET_ACCESS_KEY`` environmental variable is used.
+ :keyword str security_token: The security token associated with
+ temporary credentials issued by STS. Optional unless using
+ temporary credentials. If none is specified, the environment
+ variable ``AWS_SECURITY_TOKEN`` is used if defined.
:type is_secure: boolean
:param is_secure: Whether the connection is over SSL
@@ -680,7 +684,7 @@ def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
self.proxy_port = self.port
self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
- self.use_proxy = (self.proxy != None)
+ self.use_proxy = (self.proxy is not None)
def get_http_connection(self, host, port, is_secure):
conn = self._pool.get_http_connection(host, port, is_secure)
@@ -982,11 +986,11 @@ def build_base_http_request(self, method, path, auth_path,
path = self.get_path(path)
if auth_path is not None:
auth_path = self.get_path(auth_path)
- if params == None:
+ if params is None:
params = {}
else:
params = params.copy()
- if headers == None:
+ if headers is None:
headers = {}
else:
headers = headers.copy()
View
2 boto/core/dictresponse.py
@@ -47,7 +47,7 @@ def __init__(self, root_node, connection):
def startElement(self, name, attrs):
self.current_text = ''
t = self.nodes[-1][1].startElement(name, attrs, self.connection)
- if t != None:
+ if t is not None:
if isinstance(t, tuple):
self.nodes.append(t)
else:
View
6 boto/dynamodb/item.py
@@ -41,13 +41,13 @@ def __init__(self, table, hash_key=None, range_key=None, attrs=None):
self._updates = None
self._hash_key_name = self.table.schema.hash_key_name
self._range_key_name = self.table.schema.range_key_name
- if attrs == None:
+ if attrs is None:
attrs = {}
- if hash_key == None:
+ if hash_key is None:
hash_key = attrs.get(self._hash_key_name, None)
self[self._hash_key_name] = hash_key
if self._range_key_name:
- if range_key == None:
+ if range_key is None:
range_key = attrs.get(self._range_key_name, None)
self[self._range_key_name] = range_key
self._updates = {}
View
3 boto/dynamodb/types.py
@@ -136,6 +136,9 @@ def dynamize_value(val):
class Binary(object):
def __init__(self, value):
+ if not isinstance(value, basestring):
+ raise TypeError('Value must be a string of binary data!')
+
self.value = value
def encode(self):
View
59 boto/dynamodb2/table.py
@@ -8,6 +8,7 @@
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.results import ResultSet, BatchGetResultSet
from boto.dynamodb2.types import Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS
+from boto.exception import JSONResponseError
class Table(object):
@@ -436,7 +437,7 @@ def _encode_keys(self, keys):
return raw_key
- def get_item(self, consistent=False, **kwargs):
+ def get_item(self, consistent=False, attributes=None, **kwargs):
"""
Fetches an item (record) from a table in DynamoDB.
@@ -448,6 +449,10 @@ def get_item(self, consistent=False, **kwargs):
a consistent (but more expensive) read from DynamoDB.
(Default: ``False``)
+ Optionally accepts an ``attributes`` parameter, which should be a
+ list of fieldname to fetch. (Default: ``None``, which means all fields
+ should be fetched)
+
Returns an ``Item`` instance containing all the data for that record.
Example::
@@ -480,12 +485,52 @@ def get_item(self, consistent=False, **kwargs):
item_data = self.connection.get_item(
self.table_name,
raw_key,
+ attributes_to_get=attributes,
consistent_read=consistent
)
item = Item(self)
item.load(item_data)
return item
+ def has_item(self, **kwargs):
+ """
+ Return whether an item (record) exists within a table in DynamoDB.
+
+ To specify the key of the item you'd like to get, you can specify the
+ key attributes as kwargs.
+
+ Optionally accepts a ``consistent`` parameter, which should be a
+ boolean. If you provide ``True``, it will perform
+ a consistent (but more expensive) read from DynamoDB.
+ (Default: ``False``)
+
+ Optionally accepts an ``attributes`` parameter, which should be a
+ list of fieldnames to fetch. (Default: ``None``, which means all fields
+ should be fetched)
+
+ Returns ``True`` if an ``Item`` is present, ``False`` if not.
+
+ Example::
+
+ # Simple, just hash-key schema.
+ >>> users.has_item(username='johndoe')
+ True
+
+ # Complex schema, item not present.
+ >>> users.has_item(
+ ... username='johndoe',
+ ... date_joined='2014-01-07'
+ ... )
+ False
+
+ """
+ try:
+ self.get_item(**kwargs)
+ except JSONResponseError:
+ return False
+
+ return True
+
def lookup(self, *args, **kwargs):
"""
Look up an entry in DynamoDB. This is mostly backwards compatible
@@ -524,7 +569,6 @@ def new_item(self, *args):
data[self.schema[x].name] = arg
return Item(self, data=data)
-
def put_item(self, data, overwrite=False):
"""
Saves an entire item to DynamoDB.
@@ -969,7 +1013,7 @@ def _query(self, limit=None, index=None, reverse=False, consistent=False,
}
def scan(self, limit=None, segment=None, total_segments=None,
- max_page_size=None, **filter_kwargs):
+ max_page_size=None, attributes=None, **filter_kwargs):
"""
Scans across all items within a DynamoDB table.
@@ -1000,6 +1044,11 @@ def scan(self, limit=None, segment=None, total_segments=None,
the scan from drowning out other queries. (Default: ``None`` -
fetch as many as DynamoDB will return)
+ Optionally accepts an ``attributes`` parameter, which should be a
+ tuple. If you provide any attributes only these will be fetched
+ from DynamoDB. This uses the ``AttributesToGet`` and set's
+ ``Select`` to ``SPECIFIC_ATTRIBUTES`` API.
+
Returns a ``ResultSet``, which transparently handles the pagination of
results you get back.
@@ -1034,12 +1083,13 @@ def scan(self, limit=None, segment=None, total_segments=None,
'limit': limit,
'segment': segment,
'total_segments': total_segments,
+ 'attributes': attributes,
})
results.to_call(self._scan, **kwargs)
return results
def _scan(self, limit=None, exclusive_start_key=None, segment=None,
- total_segments=None, **filter_kwargs):
+ total_segments=None, attributes=None, **filter_kwargs):
"""
The internal method that performs the actual scan. Used extensively
by ``ResultSet`` to perform each (paginated) request.
@@ -1048,6 +1098,7 @@ def _scan(self, limit=None, exclusive_start_key=None, segment=None,
'limit': limit,
'segment': segment,
'total_segments': total_segments,
+ 'attributes_to_get': attributes,
}
if exclusive_start_key:
View
14 boto/ec2/cloudwatch/__init__.py
@@ -178,11 +178,11 @@ def aslist(a):
metric_data['StatisticValues.Minimum'] = s['minimum']
metric_data['StatisticValues.SampleCount'] = s['samplecount']
metric_data['StatisticValues.Sum'] = s['sum']
- if value != None:
+ if value is not None:
msg = 'You supplied a value and statistics for a ' + \
'metric.Posting statistics and not value.'
boto.log.warn(msg)
- elif value != None:
+ elif value is not None:
metric_data['Value'] = v
else:
raise Exception('Must specify a value or statistics to put.')
@@ -273,9 +273,13 @@ def list_metrics(self, next_token=None, dimensions=None,
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
- name that you want to filter on, a list of values to
- filter on or None if you want all metrics with that
- Dimension name.
+ name that you want to filter on or None if you want all
+ metrics with that Dimension name. To be included in the
+ result a metric must contain all specified dimensions,
+ although the metric may contain additional dimensions beyond
+ the requested metrics. The Dimension names, and values must
+ be strings between 1 and 250 characters long. A maximum of
+ 10 dimensions are allowed.
:type metric_name: str
:param metric_name: The name of the Metric to filter against. If None,
View
94 boto/ec2/connection.py
@@ -1832,6 +1832,36 @@ def assign_private_ip_addresses(self, network_interface_id=None,
return self.get_status('AssignPrivateIpAddresses', params, verb='POST')
+ def _associate_address(self, status, instance_id=None, public_ip=None,
+ allocation_id=None, network_interface_id=None,
+ private_ip_address=None, allow_reassociation=False,
+ dry_run=False):
+ params = {}
+ if instance_id is not None:
+ params['InstanceId'] = instance_id
+ elif network_interface_id is not None:
+ params['NetworkInterfaceId'] = network_interface_id
+
+ if public_ip is not None:
+ params['PublicIp'] = public_ip
+ elif allocation_id is not None:
+ params['AllocationId'] = allocation_id
+
+ if private_ip_address is not None:
+ params['PrivateIpAddress'] = private_ip_address
+
+ if allow_reassociation:
+ params['AllowReassociation'] = 'true'
+
+ if dry_run:
+ params['DryRun'] = 'true'
+
+ if status:
+ return self.get_status('AssociateAddress', params, verb='POST')
+ else:
+ return self.get_object('AssociateAddress', params, Address,
+ verb='POST')
+
def associate_address(self, instance_id=None, public_ip=None,
allocation_id=None, network_interface_id=None,
private_ip_address=None, allow_reassociation=False,
@@ -1874,27 +1904,59 @@ def associate_address(self, instance_id=None, public_ip=None,
:rtype: bool
:return: True if successful
"""
- params = {}
- if instance_id is not None:
- params['InstanceId'] = instance_id
- elif network_interface_id is not None:
- params['NetworkInterfaceId'] = network_interface_id
+ return self._associate_address(True, instance_id=instance_id,
+ public_ip=public_ip, allocation_id=allocation_id,
+ network_interface_id=network_interface_id,
+ private_ip_address=private_ip_address,
+ allow_reassociation=allow_reassociation, dry_run=dry_run)
- if public_ip is not None:
- params['PublicIp'] = public_ip
- elif allocation_id is not None:
- params['AllocationId'] = allocation_id
+ def associate_address_object(self, instance_id=None, public_ip=None,
+ allocation_id=None, network_interface_id=None,
+ private_ip_address=None, allow_reassociation=False,
+ dry_run=False):
+ """
+ Associate an Elastic IP address with a currently running instance.
+ This requires one of ``public_ip`` or ``allocation_id`` depending
+ on if you're associating a VPC address or a plain EC2 address.
- if private_ip_address is not None:
- params['PrivateIpAddress'] = private_ip_address
+ When using an Allocation ID, make sure to pass ``None`` for ``public_ip``
+ as EC2 expects a single parameter and if ``public_ip`` is passed boto
+ will preference that instead of ``allocation_id``.
- if allow_reassociation:
- params['AllowReassociation'] = 'true'
+ :type instance_id: string
+ :param instance_id: The ID of the instance
- if dry_run:
- params['DryRun'] = 'true'
+ :type public_ip: string
+ :param public_ip: The public IP address for EC2 based allocations.
+
+ :type allocation_id: string
+ :param allocation_id: The allocation ID for a VPC-based elastic IP.
+
+ :type network_interface_id: string
+ :param network_interface_id: The network interface ID to which
+ elastic IP is to be assigned to
- return self.get_status('AssociateAddress', params, verb='POST')
+ :type private_ip_address: string
+ :param private_ip_address: The primary or secondary private IP address
+ to associate with the Elastic IP address.
+
+ :type allow_reassociation: bool
+ :param allow_reassociation: Specify this option to allow an Elastic IP
+ address that is already associated with another network interface
+ or instance to be re-associated with the specified instance or
+ interface.
+
+ :type dry_run: bool
+ :param dry_run: Set to True if the operation should not actually run.
+
+ :rtype: class:`boto.ec2.address.Address`
+ :return: The associated address instance
+ """
+ return self._associate_address(False, instance_id=instance_id,
+ public_ip=public_ip, allocation_id=allocation_id,
+ network_interface_id=network_interface_id,
+ private_ip_address=private_ip_address,
+ allow_reassociation=allow_reassociation, dry_run=dry_run)
def disassociate_address(self, public_ip=None, association_id=None,
dry_run=False):
View
2 boto/ec2/elb/loadbalancer.py
@@ -324,7 +324,7 @@ def create_listeners(self, listeners):
listeners)
def create_listener(self, inPort, outPort=None, proto="tcp"):
- if outPort == None:
+ if outPort is None:
outPort = inPort
return self.create_listeners([(inPort, outPort, proto)])
View
2 boto/ecs/__init__.py
@@ -66,7 +66,7 @@ def get_response(self, action, params, page=0, itemSet=None):
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
- if itemSet == None:
+ if itemSet is None:
rs = ItemSet(self, action, params, page)
else:
rs = itemSet
View
6 boto/ecs/item.py
@@ -110,7 +110,7 @@ def __init__(self, connection, action, params, page=0):
def startElement(self, name, attrs, connection):
if name == "Item":
self.curItem = Item(self._connection)
- elif self.curItem != None:
+ elif self.curItem is not None:
self.curItem.startElement(name, attrs, connection)
return None
@@ -123,13 +123,13 @@ def endElement(self, name, value, connection):
self.objs.append(self.curItem)
self._xml.write(self.curItem.to_xml())
self.curItem = None
- elif self.curItem != None:
+ elif self.curItem is not None:
self.curItem.endElement(name, value, connection)
return None
def next(self):
"""Special paging functionality"""
- if self.iter == None:
+ if self.iter is None:
self.iter = iter(self.objs)
try:
return self.iter.next()
View
2 boto/glacier/concurrent.py
@@ -19,6 +19,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
+from __future__ import with_statement
+
import os
import math
import threading
View
29 boto/glacier/job.py
@@ -97,9 +97,12 @@ def get_output(self, byte_range=None, validate_checksum=False):
actual_tree_hash, response['TreeHash'], byte_range))
return response
+ def _calc_num_chunks(self, chunk_size):
+ return int(math.ceil(self.archive_size / float(chunk_size)))
+
def download_to_file(self, filename, chunk_size=DefaultPartSize,
verify_hashes=True, retry_exceptions=(socket.error,)):
- """Download an archive to a file.
+ """Download an archive to a file by name.
:type filename: str
:param filename: The name of the file where the archive
@@ -114,11 +117,33 @@ def download_to_file(self, filename, chunk_size=DefaultPartSize,
the tree hashes for each downloaded chunk.
"""
- num_chunks = int(math.ceil(self.archive_size / float(chunk_size)))
+ num_chunks = self._calc_num_chunks(chunk_size)
with open(filename, 'wb') as output_file:
self._download_to_fileob(output_file, num_chunks, chunk_size,
verify_hashes, retry_exceptions)
+ def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize,
+ verify_hashes=True,
+ retry_exceptions=(socket.error,)):
+ """Download an archive to a file object.
+
+ :type output_file: file
+ :param output_file: The file object where the archive
+ contents will be saved.
+
+ :type chunk_size: int
+ :param chunk_size: The chunk size to use when downloading
+ the archive.
+
+ :type verify_hashes: bool
+ :param verify_hashes: Indicates whether or not to verify
+ the tree hashes for each downloaded chunk.
+
+ """
+ num_chunks = self._calc_num_chunks(chunk_size)
+ self._download_to_fileob(output_file, num_chunks, chunk_size,
+ verify_hashes, retry_exceptions)
+
def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes,
retry_exceptions):
for i in xrange(num_chunks):
View
1,082 boto/glacier/layer1.py
@@ -33,9 +33,42 @@
class Layer1(AWSAuthConnection):
-
+ """
+ Amazon Glacier is a storage solution for "cold data."
+
+ Amazon Glacier is an extremely low-cost storage service that
+ provides secure, durable and easy-to-use storage for data backup
+ and archival. With Amazon Glacier, customers can store their data
+ cost effectively for months, years, or decades. Amazon Glacier
+ also enables customers to offload the administrative burdens of
+ operating and scaling storage to AWS, so they don't have to worry
+ about capacity planning, hardware provisioning, data replication,
+ hardware failure and recovery, or time-consuming hardware
+ migrations.
+
+ Amazon Glacier is a great storage choice when low storage cost is
+ paramount, your data is rarely retrieved, and retrieval latency of
+ several hours is acceptable. If your application requires fast or
+ frequent access to your data, consider using Amazon S3. For more
+ information, go to `Amazon Simple Storage Service (Amazon S3)`_.
+
+ You can store any kind of data in any format. There is no maximum
+ limit on the total amount of data you can store in Amazon Glacier.
+
+ If you are a first-time user of Amazon Glacier, we recommend that
+ you begin by reading the following sections in the Amazon Glacier
+ Developer Guide :
+
+
+ + `What is Amazon Glacier`_ - This section of the Developer Guide
+ describes the underlying data model, the operations it supports,
+ and the AWS SDKs that you can use to interact with the service.
+ + `Getting Started with Amazon Glacier`_ - The Getting Started
+ section walks you through the process of creating a vault,
+ uploading archives, creating jobs to download archives, retrieving
+ the job output, and deleting archives.
+ """
Version = '2012-06-01'
- """Glacier API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
account_id='-', is_secure=True, port=None,
@@ -87,35 +120,39 @@ def make_request(self, verb, resource, headers=None,
def list_vaults(self, limit=None, marker=None):
"""
- This operation lists all vaults owned by the calling users
+ This operation lists all vaults owned by the calling user's
account. The list returned in the response is ASCII-sorted by
vault name.
By default, this operation returns up to 1,000 items. If there
- are more vaults to list, the marker field in the response body
- contains the vault Amazon Resource Name (ARN) at which to
- continue the list with a new List Vaults request; otherwise,
- the marker field is null. In your next List Vaults request you
- set the marker parameter to the value Amazon Glacier returned
- in the responses to your previous List Vaults request. You can
- also limit the number of vaults returned in the response by
- specifying the limit parameter in the request.
-
- :type limit: int
- :param limit: The maximum number of items returned in the
- response. If you don't specify a value, the List Vaults
- operation returns up to 1,000 items.
-
- :type marker: str
- :param marker: A string used for pagination. marker specifies
- the vault ARN after which the listing of vaults should
- begin. (The vault specified by marker is not included in
- the returned list.) Get the marker value from a previous
- List Vaults response. You need to include the marker only
- if you are continuing the pagination of results started in
- a previous List Vaults request. Specifying an empty value
- ("") for the marker returns a list of vaults starting
- from the first vault.
+ are more vaults to list, the response `marker` field contains
+ the vault Amazon Resource Name (ARN) at which to continue the
+ list with a new List Vaults request; otherwise, the `marker`
+ field is `null`. To return a list of vaults that begins at a
+ specific vault, set the `marker` request parameter to the
+ vault ARN you obtained from a previous List Vaults request.
+ You can also limit the number of vaults returned in the
+ response by specifying the `limit` parameter in the request.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Retrieving Vault Metadata in Amazon Glacier`_ and `List
+ Vaults `_ in the Amazon Glacier Developer Guide .
+
+ :type marker: string
+ :param marker: A string used for pagination. The marker specifies the
+ vault ARN after which the listing of vaults should begin.
+
+ :type limit: string
+ :param limit: The maximum number of items returned in the response. If
+ you don't specify a value, the List Vaults operation returns up to
+ 1,000 items.
"""
params = {}
if limit:
@@ -127,18 +164,31 @@ def list_vaults(self, limit=None, marker=None):
def describe_vault(self, vault_name):
"""
This operation returns information about a vault, including
- the vault Amazon Resource Name (ARN), the date the vault was
- created, the number of archives contained within the vault,
- and the total size of all the archives in the vault. The
- number of archives and their total size are as of the last
- vault inventory Amazon Glacier generated. Amazon Glacier
- generates vault inventories approximately daily. This means
- that if you add or remove an archive from a vault, and then
- immediately send a Describe Vault request, the response might
- not reflect the changes.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ the vault's Amazon Resource Name (ARN), the date the vault was
+ created, the number of archives it contains, and the total
+ size of all the archives in the vault. The number of archives
+ and their total size are as of the last inventory generation.
+ This means that if you add or remove an archive from a vault,
+ and then immediately use Describe Vault, the change in
+ contents will not be immediately reflected. If you want to
+ retrieve the latest inventory of the vault, use InitiateJob.
+ Amazon Glacier generates vault inventories approximately
+ daily. For more information, see `Downloading a Vault
+ Inventory in Amazon Glacier`_.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Retrieving Vault Metadata in Amazon Glacier`_ and `Describe
+ Vault `_ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s' % vault_name
return self.make_request('GET', uri)
@@ -147,23 +197,34 @@ def create_vault(self, vault_name):
"""
This operation creates a new vault with the specified name.
The name of the vault must be unique within a region for an
- AWS account. You can create up to 1,000 vaults per
- account. For information on creating more vaults, go to the
- Amazon Glacier product detail page.
+ AWS account. You can create up to 1,000 vaults per account. If
+ you need to create more vaults, contact Amazon Glacier.
You must use the following guidelines when naming a vault.
- Names can be between 1 and 255 characters long.
- Allowed characters are a–z, A–Z, 0–9, '_' (underscore),
- '-' (hyphen), and '.' (period).
- This operation is idempotent, you can send the same request
- multiple times and it has no further effect after the first
- time Amazon Glacier creates the specified vault.
+ + Names can be between 1 and 255 characters long.
+ + Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-'
+ (hyphen), and '.' (period).
- :type vault_name: str
- :param vault_name: The name of the new vault
+
+
+ This operation is idempotent.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Creating a Vault in Amazon Glacier`_ and `Create Vault `_ in
+ the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s' % vault_name
return self.make_request('PUT', uri, ok_responses=(201,),
@@ -172,50 +233,114 @@ def create_vault(self, vault_name):
def delete_vault(self, vault_name):
"""
This operation deletes a vault. Amazon Glacier will delete a
- vault only if there are no archives in the vault as per the
+ vault only if there are no archives in the vault as of the
last inventory and there have been no writes to the vault
since the last inventory. If either of these conditions is not
satisfied, the vault deletion fails (that is, the vault is not
- removed) and Amazon Glacier returns an error.
-
- This operation is idempotent, you can send the same request
- multiple times and it has no further effect after the first
- time Amazon Glacier delete the specified vault.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ removed) and Amazon Glacier returns an error. You can use
+ DescribeVault to return the number of archives in a vault, and
+ you can use `Initiate a Job (POST jobs)`_ to initiate a new
+ inventory retrieval for a vault. The inventory contains the
+ archive IDs you use to delete archives using `Delete Archive
+ (DELETE archive)`_.
+
+ This operation is idempotent.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Deleting a Vault in Amazon Glacier`_ and `Delete Vault `_ in
+ the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s' % vault_name
return self.make_request('DELETE', uri, ok_responses=(204,))
def get_vault_notifications(self, vault_name):
"""
- This operation retrieves the notification-configuration
- subresource set on the vault.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ This operation retrieves the `notification-configuration`
+ subresource of the specified vault.
+
+ For information about setting a notification configuration on
+ a vault, see SetVaultNotifications. If a notification
+ configuration for a vault is not set, the operation returns a
+ `404 Not Found` error. For more information about vault
+ notifications, see `Configuring Vault Notifications in Amazon
+ Glacier`_.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Configuring Vault Notifications in Amazon Glacier`_ and `Get
+ Vault Notification Configuration `_ in the Amazon Glacier
+ Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s/notification-configuration' % vault_name
return self.make_request('GET', uri)
def set_vault_notifications(self, vault_name, notification_config):
"""
- This operation retrieves the notification-configuration
- subresource set on the vault.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
-
- :type notification_config: dict
- :param notification_config: A Python dictionary containing
- an SNS Topic and events for which you want Amazon Glacier
- to send notifications to the topic. Possible events are:
+ This operation configures notifications that will be sent when
+ specific events happen to a vault. By default, you don't get
+ any notifications.
+
+ To configure vault notifications, send a PUT request to the
+ `notification-configuration` subresource of the vault. The
+ request should include a JSON document that provides an Amazon
+ SNS topic and specific events for which you want Amazon
+ Glacier to send notifications to the topic.
+
+ Amazon SNS topics must grant permission to the vault to be
+ allowed to publish notifications to the topic. You can
+ configure a vault to publish a notification for the following
+ vault events:
+
+
+ + **ArchiveRetrievalCompleted** This event occurs when a job
+ that was initiated for an archive retrieval is completed
+ (InitiateJob). The status of the completed job can be
+ "Succeeded" or "Failed". The notification sent to the SNS
+ topic is the same output as returned from DescribeJob.
+ + **InventoryRetrievalCompleted** This event occurs when a job
+ that was initiated for an inventory retrieval is completed
+ (InitiateJob). The status of the completed job can be
+ "Succeeded" or "Failed". The notification sent to the SNS
+ topic is the same output as returned from DescribeJob.
+
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Configuring Vault Notifications in Amazon Glacier`_ and `Set
+ Vault Notification Configuration `_ in the Amazon Glacier
+ Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
- * ArchiveRetrievalCompleted - occurs when a job that was
- initiated for an archive retrieval is completed.
- * InventoryRetrievalCompleted - occurs when a job that was
- initiated for an inventory retrieval is completed.
+ :type vault_notification_config: dict
+ :param vault_notification_config: Provides options for specifying
+ notification configuration.
The format of the dictionary is:
@@ -229,11 +354,27 @@ def set_vault_notifications(self, vault_name, notification_config):
def delete_vault_notifications(self, vault_name):
"""
- This operation deletes the notification-configuration
- subresource set on the vault.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ This operation deletes the notification configuration set for
+ a vault. The operation is eventually consistent;that is, it
+ might take some time for Amazon Glacier to completely disable
+ the notifications and you might still receive some
+ notifications for a short time after you send the delete
+ request.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Configuring Vault Notifications in Amazon Glacier`_ and
+ `Delete Vault Notification Configuration `_ in the Amazon
+ Glacier Developer Guide.
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
"""
uri = 'vaults/%s/notification-configuration' % vault_name
return self.make_request('DELETE', uri, ok_responses=(204,))
@@ -243,36 +384,80 @@ def delete_vault_notifications(self, vault_name):
def list_jobs(self, vault_name, completed=None, status_code=None,
limit=None, marker=None):
"""
- This operation lists jobs for a vault including jobs that are
+ This operation lists jobs for a vault, including jobs that are
in-progress and jobs that have recently finished.
- :type vault_name: str
+
+ Amazon Glacier retains recently completed jobs for a period
+ before deleting them; however, it eventually removes completed
+ jobs. The output of completed jobs can be retrieved. Retaining
+ completed jobs for a period of time after they have completed
+ enables you to get a job output in the event you miss the job
+ completion notification or your first attempt to download it
+ fails. For example, suppose you start an archive retrieval job
+ to download an archive. After the job completes, you start to
+ download the archive but encounter a network error. In this
+ scenario, you can retry and download the archive while the job
+ exists.
+
+
+ To retrieve an archive or retrieve a vault inventory from
+ Amazon Glacier, you first initiate a job, and after the job
+ completes, you download the data. For an archive retrieval,
+ the output is the archive data, and for an inventory
+ retrieval, it is the inventory list. The List Job operation
+ returns a list of these jobs sorted by job initiation time.
+
+ This List Jobs operation supports pagination. By default, this
+ operation returns up to 1,000 jobs in the response. You should
+ always check the response for a `marker` at which to continue
+ the list; if there are no more items the `marker` is `null`.
+ To return a list of jobs that begins at a specific job, set
+ the `marker` request parameter to the value you obtained from
+ a previous List Jobs request. You can also limit the number of
+ jobs returned in the response by specifying the `limit`
+ parameter in the request.
+
+ Additionally, you can filter the jobs list returned by
+ specifying an optional `statuscode` (InProgress, Succeeded, or
+ Failed) and `completed` (true, false) parameter. The
+ `statuscode` allows you to specify that only jobs that match a
+ specified status are returned. The `completed` parameter
+ allows you to specify that only jobs in a specific completion
+ state are returned.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For the underlying REST API, go to `List Jobs `_
+
+ :type vault_name: string
:param vault_name: The name of the vault.
- :type completed: boolean
- :param completed: Specifies the state of the jobs to return.
- If a value of True is passed, only completed jobs will
- be returned. If a value of False is passed, only
- uncompleted jobs will be returned. If no value is
- passed, all jobs will be returned.
+ :type limit: string
+ :param limit: Specifies that the response be limited to the specified
+ number of items or fewer. If not specified, the List Jobs operation
+ returns up to 1,000 jobs.
- :type status_code: string
- :param status_code: Specifies the type of job status to return.
- Valid values are: InProgress|Succeeded|Failed. If not
- specified, jobs with all status codes are returned.
+ :type marker: string
+ :param marker: An opaque string used for pagination. This value
+ specifies the job at which the listing of jobs should begin. Get
+ the marker value from a previous List Jobs response. You need only
+ include the marker if you are continuing the pagination of results
+ started in a previous List Jobs request.
- :type limit: int
- :param limit: The maximum number of items returned in the
- response. If you don't specify a value, the List Jobs
- operation returns up to 1,000 items.
+ :type statuscode: string
+ :param statuscode: Specifies the type of job status to return. You can
+ specify the following values: "InProgress", "Succeeded", or
+ "Failed".
- :type marker: str
- :param marker: An opaque string used for pagination. marker
- specifies the job at which the listing of jobs should
- begin. Get the marker value from a previous List Jobs
- response. You need only include the marker if you are
- continuing the pagination of results started in a previous
- List Jobs request.
+ :type completed: string
+ :param completed: Specifies the state of the jobs to return. You can
+ specify `True` or `False`.
"""
params = {}
@@ -292,39 +477,154 @@ def describe_job(self, vault_name, job_id):
This operation returns information about a job you previously
initiated, including the job initiation date, the user who
initiated the job, the job status code/message and the Amazon
- Simple Notification Service (Amazon SNS) topic to notify after
- Amazon Glacier completes the job.
+ SNS topic to notify after Amazon Glacier completes the job.
+ For more information about initiating a job, see InitiateJob.
+
+
+ This operation enables you to check the status of your job.
+ However, it is strongly recommended that you set up an Amazon
+ SNS topic and specify it in your initiate job request so that
+ Amazon Glacier can notify the topic after it completes the
+ job.
- :type vault_name: str
- :param vault_name: The name of the new vault
- :type job_id: str
- :param job_id: The ID of the job.
+ A job ID will not expire for at least 24 hours after Amazon
+ Glacier completes the job.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For information about the underlying REST API, go to `Working
+ with Archives in Amazon Glacier`_ in the Amazon Glacier
+ Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
+
+ :type job_id: string
+ :param job_id: The ID of the job to describe.
"""
uri = 'vaults/%s/jobs/%s' % (vault_name, job_id)
return self.make_request('GET', uri, ok_responses=(200,))
def initiate_job(self, vault_name, job_data):
"""
- This operation initiates a job of the specified
- type. Retrieving an archive or a vault inventory are
- asynchronous operations that require you to initiate a job. It
- is a two-step process:
-
- * Initiate a retrieval job.
- * After the job completes, download the bytes.
-
- The retrieval is executed asynchronously. When you initiate
- a retrieval job, Amazon Glacier creates a job and returns a
- job ID in the response.
-
- :type vault_name: str
- :param vault_name: The name of the new vault
+ This operation initiates a job of the specified type. In this
+ release, you can initiate a job to retrieve either an archive
+ or a vault inventory (a list of archives in a vault).
+
+ Retrieving data from Amazon Glacier is a two-step process:
+
+
+ #. Initiate a retrieval job.
+ #. After the job completes, download the bytes.
+
+
+ The retrieval request is executed asynchronously. When you
+ initiate a retrieval job, Amazon Glacier creates a job and
+ returns a job ID in the response. When Amazon Glacier
+ completes the job, you can get the job output (archive or
+ inventory data). For information about getting job output, see
+ GetJobOutput operation.
+
+ The job must complete before you can get its output. To
+ determine when a job is complete, you have the following
+ options:
+
+
+ + **Use Amazon SNS Notification** You can specify an Amazon
+ Simple Notification Service (Amazon SNS) topic to which Amazon
+ Glacier can post a notification after the job is completed.
+ You can specify an SNS topic per job request. The notification
+ is sent only after Amazon Glacier completes the job. In
+ addition to specifying an SNS topic per job request, you can
+ configure vault notifications for a vault so that job
+ notifications are always sent. For more information, see
+ SetVaultNotifications.
+ + **Get job details** You can make a DescribeJob request to
+ obtain job status information while a job is in progress.
+ However, it is more efficient to use an Amazon SNS
+ notification to determine when a job is complete.
+
+
+
+ The information you get via notification is same that you get
+ by calling DescribeJob.
+
+
+ If for a specific event, you add both the notification
+ configuration on the vault and also specify an SNS topic in
+ your initiate job request, Amazon Glacier sends both
+ notifications. For more information, see
+ SetVaultNotifications.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ **About the Vault Inventory**
+
+ Amazon Glacier prepares an inventory for each vault
+ periodically, every 24 hours. When you initiate a job for a
+ vault inventory, Amazon Glacier returns the last inventory for
+ the vault. The inventory data you get might be up to a day or
+ two days old. Also, the initiate inventory job might take some
+ time to complete before you can download the vault inventory.
+ So you do not want to retrieve a vault inventory for each
+ vault operation. However, in some scenarios, you might find
+ the vault inventory useful. For example, when you upload an
+ archive, you can provide an archive description but not an
+ archive name. Amazon Glacier provides you a unique archive ID,
+ an opaque string of characters. So, you might maintain your
+ own database that maps archive names to their corresponding
+ Amazon Glacier assigned archive IDs. You might find the vault
+ inventory useful in the event you need to reconcile
+ information in your database with the actual vault inventory.
+
+ **About Ranged Archive Retrieval**
+
+ You can initiate an archive retrieval for the whole archive or
+ a range of the archive. In the case of ranged archive
+ retrieval, you specify a byte range to return or the whole
+ archive. The range specified must be megabyte (MB) aligned,
+ that is the range start value must be divisible by 1 MB and
+ range end value plus 1 must be divisible by 1 MB or equal the
+ end of the archive. If the ranged archive retrieval is not
+ megabyte aligned, this operation returns a 400 response.
+ Furthermore, to ensure you get checksum values for data you
+ download using Get Job Output API, the range must be tree hash
+ aligned.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and the underlying REST API, go to
+ `Initiate a Job`_ and `Downloading a Vault Inventory`_
+
+ :type account_id: string
+ :param account_id: The `AccountId` is the AWS Account ID. You can
+ specify either the AWS Account ID or optionally a '-', in which
+ case Amazon Glacier uses the AWS Account ID associated with the
+ credentials used to sign the request. If you specify your Account
+ ID, do not include hyphens in it.
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
- :type job_data: dict
- :param job_data: A Python dictionary containing the
- information about the requested job. The dictionary
- can contain the following attributes:
+ :type job_parameters: dict
+ :param job_parameters: Provides options for specifying job information.
+ The dictionary can contain the following attributes:
* ArchiveId - The ID of the archive you want to retrieve.
This field is required only if the Type is set to
@@ -340,6 +640,12 @@ def initiate_job(self, vault_name, job_data):
archive-retrieval|inventory-retrieval
* RetrievalByteRange - Optionally specify the range of
bytes to retrieve.
+ * InventoryRetrievalParameters: Optional job parameters
+ * Format - The output format, like "JSON"
+ * StartDate - ISO8601 starting date string
+ * EndDate - ISO8601 ending date string
+ * Limit - Maximum number of entries
+ * Marker - A unique string used for pagination
"""
uri = 'vaults/%s/jobs' % vault_name
@@ -353,27 +659,72 @@ def initiate_job(self, vault_name, job_data):
def get_job_output(self, vault_name, job_id, byte_range=None):
"""
This operation downloads the output of the job you initiated
- using Initiate a Job. Depending on the job type
- you specified when you initiated the job, the output will be
- either the content of an archive or a vault inventory.
-
- You can download all the job output or download a portion of
- the output by specifying a byte range. In the case of an
- archive retrieval job, depending on the byte range you
- specify, Amazon Glacier returns the checksum for the portion
- of the data. You can compute the checksum on the client and
- verify that the values match to ensure the portion you
- downloaded is the correct data.
-
- :type vault_name: str :param
- :param vault_name: The name of the new vault
+ using InitiateJob. Depending on the job type you specified
+ when you initiated the job, the output will be either the
+ content of an archive or a vault inventory.
+
+ A job ID will not expire for at least 24 hours after Amazon
+ Glacier completes the job. That is, you can download the job
+ output within the 24 hours period after Amazon Glacier
+ completes the job.
+
+ If the job output is large, then you can use the `Range`
+ request header to retrieve a portion of the output. This
+ allows you to download the entire output in smaller chunks of
+ bytes. For example, suppose you have 1 GB of job output you
+ want to download and you decide to download 128 MB chunks of
+ data at a time, which is a total of eight Get Job Output
+ requests. You use the following process to download the job
+ output:
+
+
+ #. Download a 128 MB chunk of output by specifying the
+ appropriate byte range using the `Range` header.
+ #. Along with the data, the response includes a checksum of
+ the payload. You compute the checksum of the payload on the
+ client and compare it with the checksum you received in the
+ response to ensure you received all the expected data.
+ #. Repeat steps 1 and 2 for all the eight 128 MB chunks of
+ output data, each time specifying the appropriate byte range.
+ #. After downloading all the parts of the job output, you have
+ a list of eight checksum values. Compute the tree hash of
+ these values to find the checksum of the entire output. Using
+ the Describe Job API, obtain job information of the job that
+ provided you the output. The response includes the checksum of
+ the entire archive stored in Amazon Glacier. You compare this
+ value with the checksum you computed to ensure you have
+ downloaded the entire archive content with no errors.
+
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and the underlying REST API, go to
+ `Downloading a Vault Inventory`_, `Downloading an Archive`_,
+ and `Get Job Output `_
+
+ :type account_id: string
+ :param account_id: The `AccountId` is the AWS Account ID. You can
+ specify either the AWS Account ID or optionally a '-', in which
+ case Amazon Glacier uses the AWS Account ID associated with the
+ credentials used to sign the request. If you specify your Account
+ ID, do not include hyphens in it.
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
- :type job_id: str
- :param job_id: The ID of the job.
+ :type job_id: string
+ :param job_id: The job ID whose data is downloaded.
- :type byte_range: tuple
- :param range: A tuple of integers specifying the slice (in bytes)
- of the archive you want to receive
+ :type byte_range: string
+ :param byte_range: The range of bytes to retrieve from the output. For
+ example, if you want to download the first 1,048,576 bytes, specify
+ "Range: bytes=0-1048575". By default, this operation downloads the
+ entire output.
"""
response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'),
('Content-Range', u'ContentRange'),
@@ -392,13 +743,50 @@ def get_job_output(self, vault_name, job_id, byte_range=None):
def upload_archive(self, vault_name, archive,
linear_hash, tree_hash, description=None):
"""
- This operation adds an archive to a vault. For a successful
- upload, your data is durably persisted. In response, Amazon
- Glacier returns the archive ID in the x-amz-archive-id header
- of the response. You should save the archive ID returned so
- that you can access the archive later.
+ This operation adds an archive to a vault. This is a
+ synchronous operation, and for a successful upload, your data
+ is durably persisted. Amazon Glacier returns the archive ID in
+ the `x-amz-archive-id` header of the response.
+
+ You must use the archive ID to access your data in Amazon
+ Glacier. After you upload an archive, you should save the
+ archive ID returned so that you can retrieve or delete the
+ archive later. Besides saving the archive ID, you can also
+ index it and give it a friendly name to allow for better
+ searching. You can also use the optional archive description
+ field to specify how the archive is referred to in an external
+ index of archives, such as you might create in Amazon
+ DynamoDB. You can also get the vault inventory to obtain a
+ list of archive IDs in a vault. For more information, see
+ InitiateJob.
+
+ You must provide a SHA256 tree hash of the data you are
+ uploading. For information about computing a SHA256 tree hash,
+ see `Computing Checksums`_.
+
+ You can optionally specify an archive description of up to
+ 1,024 printable ASCII characters. You can get the archive
+ description when you either retrieve the archive or get the
+ vault inventory. For more information, see InitiateJob. Amazon
+ Glacier does not interpret the description in any way. An
+ archive description does not need to be unique. You cannot use
+ the description to retrieve or sort the archive list.
+
+ Archives are immutable. After you upload an archive, you
+ cannot edit the archive or its description.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Uploading an Archive in Amazon Glacier`_ and `Upload
+ Archive`_ in the Amazon Glacier Developer Guide .
- :type vault_name: str :param
+ :type vault_name: str
:param vault_name: The name of the vault
:type archive: bytes
@@ -414,7 +802,8 @@ def upload_archive(self, vault_name, archive,
tree hash, see http://goo.gl/u7chF.
:type description: str
- :param description: An optional description of the archive.
+ :param description: The optional description of the archive you
+ are uploading.
"""
response_headers = [('x-amz-archive-id', u'ArchiveId'),
('Location', u'Location'),
@@ -445,13 +834,39 @@ def _is_file_like(self, archive):
def delete_archive(self, vault_name, archive_id):
"""
- This operation deletes an archive from a vault.
+ This operation deletes an archive from a vault. Subsequent
+ requests to initiate a retrieval of this archive will fail.
+ Archive retrievals that are in progress for this archive ID
+ may or may not succeed according to the following scenarios:
+
+
+ + If the archive retrieval job is actively preparing the data
+ for download when Amazon Glacier receives the delete archive
+ request, the archival retrieval operation might fail.
+ + If the archive retrieval job has successfully prepared the
+ archive for download when Amazon Glacier receives the delete
+ archive request, you will be able to download the output.
- :type vault_name: str
- :param vault_name: The name of the new vault
- :type archive_id: str
- :param archive_id: The ID for the archive to be deleted.
+ This operation is idempotent. Attempting to delete an already-
+ deleted archive does not result in an error.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Deleting an Archive in Amazon Glacier`_ and `Delete Archive`_
+ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
+ :param vault_name: The name of the vault.
+
+ :type archive_id: string
+ :param archive_id: The ID of the archive to delete.
"""
uri = 'vaults/%s/archives/%s' % (vault_name, archive_id)
return self.make_request('DELETE', uri, ok_responses=(204,))
@@ -461,21 +876,65 @@ def delete_archive(self, vault_name, archive_id):
def initiate_multipart_upload(self, vault_name, part_size,
description=None):
"""
- Initiate a multipart upload. Amazon Glacier creates a
- multipart upload resource and returns it's ID. You use this
- ID in subsequent multipart upload operations.
+ This operation initiates a multipart upload. Amazon Glacier
+ creates a multipart upload resource and returns its ID in the
+ response. The multipart upload ID is used in subsequent
+ requests to upload parts of an archive (see
+ UploadMultipartPart).
+
+ When you initiate a multipart upload, you specify the part
+ size in number of bytes. The part size must be a megabyte
+ (1024 KB) multiplied by a power of 2-for example, 1048576 (1
+ MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so
+ on. The minimum allowable part size is 1 MB, and the maximum
+ is 4 GB.
+
+ Every part you upload to this resource (see
+ UploadMultipartPart), except the last one, must have the same
+ size. The last one can be the same size or smaller. For
+ example, suppose you want to upload a 16.2 MB file. If you
+ initiate the multipart upload with a part size of 4 MB, you
+ will upload four parts of 4 MB each and one part of 0.2 MB.
+
+
+ You don't need to know the size of the archive when you start
+ a multipart upload because Amazon Glacier does not require you
+ to specify the overall archive size.
+
+
+ After you complete the multipart upload, Amazon Glacier
+ removes the multipart upload resource referenced by the ID.
+ Amazon Glacier also removes the multipart upload resource if
+ you cancel the multipart upload or it may be removed if there
+ is no activity for a period of 24 hours.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Uploading Large Archives in Parts (Multipart Upload)`_ and
+ `Initiate Multipart Upload`_ in the Amazon Glacier Developer
+ Guide .
+
+ The part size must be a megabyte (1024 KB) multiplied by a power of
+ 2, for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB),
+ 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB,
+ and the maximum is 4 GB (4096 MB).
:type vault_name: str
:param vault_name: The name of the vault.
:type description: str
- :param description: An optional description of the archive.
+ :param description: The archive description that you are uploading in
+ parts.
:type part_size: int
- :param part_size: The size of each part except the last, in bytes.
- The part size must be a multiple of 1024 KB multiplied by
- a power of 2. The minimum allowable part size is 1MB and the
- maximum is 4GB.
+ :param part_size: The size of each part except the last, in bytes. The
+ last part can be smaller than this part size.
"""
response_headers = [('x-amz-multipart-upload-id', u'UploadId'),
('Location', u'Location')]
@@ -491,24 +950,77 @@ def initiate_multipart_upload(self, vault_name, part_size,
def complete_multipart_upload(self, vault_name, upload_id,
sha256_treehash, archive_size):
"""
- Call this to inform Amazon Glacier that all of the archive parts
- have been uploaded and Amazon Glacier can now assemble the archive
- from the uploaded parts.
+ You call this operation to inform Amazon Glacier that all the
+ archive parts have been uploaded and that Amazon Glacier can
+ now assemble the archive from the uploaded parts. After
+ assembling and saving the archive to the vault, Amazon Glacier
+ returns the URI path of the newly created archive resource.
+ Using the URI path, you can then access the archive. After you
+ upload an archive, you should save the archive ID returned to
+ retrieve the archive at a later point. You can also get the
+ vault inventory to obtain a list of archive IDs in a vault.
+ For more information, see InitiateJob.
+
+ In the request, you must include the computed SHA256 tree hash
+ of the entire archive you have uploaded. For information about
+ computing a SHA256 tree hash, see `Computing Checksums`_. On
+ the server side, Amazon Glacier also constructs the SHA256
+ tree hash of the assembled archive. If the values match,
+ Amazon Glacier saves the archive to the vault; otherwise, it
+ returns an error, and the operation fails. The ListParts
+ operation returns a list of parts uploaded for a specific
+ multipart upload. It includes checksum information for each
+ uploaded part that can be used to debug a bad checksum issue.
+
+ Additionally, Amazon Glacier also checks for any missing
+ content ranges when assembling the archive, if missing content
+ ranges are found, Amazon Glacier returns an error and the
+ operation fails.
+
+ Complete Multipart Upload is an idempotent operation. After
+ your first successful complete multipart upload, if you call
+ the operation again within a short period, the operation will
+ succeed and return the same archive ID. This is useful in the
+ event you experience a network issue that causes an aborted
+ connection or receive a 500 server error, in which case you
+ can repeat your Complete Multipart Upload request and get the
+ same archive ID without creating duplicate archives. Note,
+ however, that after the multipart upload completes, you cannot
+ call the List Parts operation and the multipart upload will
+ not appear in List Multipart Uploads response, even if
+ idempotent complete is possible.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Uploading Large Archives in Parts (Multipart Upload)`_ and
+ `Complete Multipart Upload`_ in the Amazon Glacier Developer
+ Guide .
+
+ :type checksum: string
+ :param checksum: The SHA256 tree hash of the entire archive. It is the
+ tree hash of SHA256 tree hash of the individual parts. If the value
+ you specify in the request does not match the SHA256 tree hash of
+ the final assembled archive as computed by Amazon Glacier, Amazon
+ Glacier returns an error and the request fails.
:type vault_name: str
:param vault_name: The name of the vault.
:type upload_id: str
- :param upload_id: The unique ID associated with this upload
- operation.
+ :param upload_id: The upload ID of the multipart upload.
:type sha256_treehash: str
- :param sha256_treehash: The SHA256 tree hash of the entire
- archive. It is the tree hash of SHA256 tree hash of the
- individual parts. If the value you specify in the request
- does not match the SHA256 tree hash of the final assembled
- archive as computed by Amazon Glacier, Amazon Glacier
- returns an error and the request fails.
+ :param sha256_treehash: The SHA256 tree hash of the entire archive.
+ It is the tree hash of SHA256 tree hash of the individual parts.
+ If the value you specify in the request does not match the SHA256
+ tree hash of the final assembled archive as computed by Amazon
+ Glacier, Amazon Glacier returns an error and the request fails.
:type archive_size: int
:param archive_size: The total size, in bytes, of the entire
@@ -527,37 +1039,90 @@ def complete_multipart_upload(self, vault_name, upload_id,
def abort_multipart_upload(self, vault_name, upload_id):
"""
- Call this to abort a multipart upload identified by the upload ID.
-
- :type vault_name: str
+ This operation aborts a multipart upload identified by the
+ upload ID.
+
+ After the Abort Multipart Upload request succeeds, you cannot
+ upload any more parts to the multipart upload or complete the
+ multipart upload. Aborting a completed upload fails. However,
+ aborting an already-aborted upload will succeed, for a short
+ time. For more information about uploading a part and
+ completing a multipart upload, see UploadMultipartPart and
+ CompleteMultipartUpload.
+
+ This operation is idempotent.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Working with Archives in Amazon Glacier`_ and `Abort
+ Multipart Upload`_ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
:param vault_name: The name of the vault.
- :type upload_id: str
- :param upload_id: The unique ID associated with this upload
- operation.
+ :type upload_id: string
+ :param upload_id: The upload ID of the multipart upload to delete.
"""
uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)
return self.make_request('DELETE', uri, ok_responses=(204,))
def list_multipart_uploads(self, vault_name, limit=None, marker=None):
"""
- Lists in-progress multipart uploads for the specified vault.
-
- :type vault_name: str
+ This operation lists in-progress multipart uploads for the
+ specified vault. An in-progress multipart upload is a
+ multipart upload that has been initiated by an
+ InitiateMultipartUpload request, but has not yet been
+ completed or aborted. The list returned in the List Multipart
+ Upload response has no guaranteed order.
+
+ The List Multipart Uploads operation supports pagination. By
+ default, this operation returns up to 1,000 multipart uploads
+ in the response. You should always check the response for a
+ `marker` at which to continue the list; if there are no more
+ items the `marker` is `null`. To return a list of multipart
+ uploads that begins at a specific upload, set the `marker`
+ request parameter to the value you obtained from a previous
+ List Multipart Upload request. You can also limit the number
+ of uploads returned in the response by specifying the `limit`
+ parameter in the request.
+
+ Note the difference between this operation and listing parts
+ (ListParts). The List Multipart Uploads operation lists all
+ multipart uploads for a vault and does not require a multipart
+ upload ID. The List Parts operation requires a multipart
+ upload ID since parts are associated with a single upload.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and the underlying REST API, go to
+ `Working with Archives in Amazon Glacier`_ and `List Multipart
+ Uploads `_ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
:param vault_name: The name of the vault.
- :type limit: int
- :param limit: The maximum number of items returned in the
- response. If you don't specify a value, the operation
- returns up to 1,000 items.
-
- :type marker: str
- :param marker: An opaque string used for pagination. marker
- specifies the item at which the listing should
- begin. Get the marker value from a previous
- response. You need only include the marker if you are
- continuing the pagination of results started in a previous
- request.
+ :type limit: string
+ :param limit: Specifies the maximum number of uploads returned in the
+ response body. If this value is not specified, the List Uploads
+ operation returns up to 1,000 uploads.
+
+ :type marker: string
+ :param marker: An opaque string used for pagination. This value
+ specifies the upload at which the listing of uploads should begin.
+ Get the marker value from a previous List Uploads response. You
+ need only include the marker if you are continuing the pagination
+ of results started in a previous List Uploads request.
"""
params = {}
if limit:
@@ -569,27 +1134,51 @@ def list_multipart_uploads(self, vault_name, limit=None, marker=None):
def list_parts(self, vault_name, upload_id, limit=None, marker=None):
"""
- Lists in-progress multipart uploads for the specified vault.
-
- :type vault_name: str
+ This operation lists the parts of an archive that have been
+ uploaded in a specific multipart upload. You can make this
+ request at any time during an in-progress multipart upload
+ before you complete the upload (see CompleteMultipartUpload.
+ List Parts returns an error for completed uploads. The list
+ returned in the List Parts response is sorted by part range.
+
+ The List Parts operation supports pagination. By default, this
+ operation returns up to 1,000 uploaded parts in the response.
+ You should always check the response for a `marker` at which
+ to continue the list; if there are no more items the `marker`
+ is `null`. To return a list of parts that begins at a specific
+ part, set the `marker` request parameter to the value you
+ obtained from a previous List Parts request. You can also
+ limit the number of parts returned in the response by
+ specifying the `limit` parameter in the request.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and the underlying REST API, go to
+ `Working with Archives in Amazon Glacier`_ and `List Parts`_
+ in the Amazon Glacier Developer Guide .
+
+ :type vault_name: string
:param vault_name: The name of the vault.
- :type upload_id: str
- :param upload_id: The unique ID associated with this upload
- operation.
+ :type upload_id: string
+ :param upload_id: The upload ID of the multipart upload.
- :type limit: int
- :param limit: The maximum number of items returned in the
- response. If you don't specify a value, the operation
- returns up to 1,000 items.
+ :type marker: string
+ :param marker: An opaque string used for pagination. This value
+ specifies the part at which the listing of parts should begin. Get
+ the marker value from the response of a previous List Parts
+ response. You need only include the marker if you are continuing
+ the pagination of results started in a previous List Parts request.
- :type marker: str
- :param marker: An opaque string used for pagination. marker
- specifies the item at which the listing should
- begin. Get the marker value from a previous
- response. You need only include the marker if you are
- continuing the pagination of results started in a previous
- request.
+ :type limit: string
+ :param limit: Specifies the maximum number of parts returned in the
+ response body. If this value is not specified, the List Parts
+ operation returns up to 1,000 uploads.
"""
params = {}
if limit:
@@ -602,7 +1191,55 @@ def list_parts(self, vault_name, upload_id, limit=None, marker=None):
def upload_part(self, vault_name, upload_id, linear_hash,
tree_hash, byte_range, part_data):
"""
- Lists in-progress multipart uploads for the specified vault.
+ This operation uploads a part of an archive. You can upload
+ archive parts in any order. You can also upload them in
+ parallel. You can upload up to 10,000 parts for a multipart
+ upload.
+
+ Amazon Glacier rejects your upload part request if any of the
+ following conditions is true:
+
+
+ + **SHA256 tree hash does not match**To ensure that part data
+ is not corrupted in transmission, you compute a SHA256 tree
+ hash of the part and include it in your request. Upon
+ receiving the part data, Amazon Glacier also computes a SHA256
+ tree hash. If these hash values don't match, the operation
+ fails. For information about computing a SHA256 tree hash, see
+ `Computing Checksums`_.
+ + **Part size does not match**The size of each part except the
+ last must match the size specified in the corresponding
+ InitiateMultipartUpload request. The size of the last part
+ must be the same size as, or smaller than, the specified size.
+ If you upload a part whose size is smaller than the part size
+ you specified in your initiate multipart upload request and
+ that part is not the last part, then the upload part request
+ will succeed. However, the subsequent Complete Multipart
+ Upload request will fail.
+ + **Range does not align**The byte range value in the request
+ does not align with the part size specified in the
+ corresponding initiate request. For example, if you specify a
+ part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4
+ MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid
+ part ranges. However, if you set a range value of 2 MB to 6
+ MB, the range does not align with the part size and the upload
+ will fail.
+
+
+ This operation is idempotent. If you upload the same part
+ multiple times, the data included in the most recent request
+ overwrites the previously uploaded data.
+
+ An AWS account has full permission to perform all operations
+ (actions). However, AWS Identity and Access Management (IAM)
+ users don't have any permissions by default. You must grant
+ them explicit permission to perform specific actions. For more
+ information, see `Access Control Using AWS Identity and Access
+ Management (IAM)`_.
+
+ For conceptual information and underlying REST API, go to
+ `Uploading Large Archives in Parts (Multipart Upload)`_ and
+ `Upload Part `_ in the Amazon Glacier Developer Guide .
:type vault_name: str
:param vault_name: The name of the vault.
@@ -621,8 +1258,11 @@ def upload_part(self, vault_name, upload_id, linear_hash,
operation.
:type byte_range: tuple of ints
- :param byte_range: Identfies the range of bytes in the assembled
- archive that will be uploaded in this part.
+ :param byte_range: Identifies the range of bytes in the assembled
+ archive that will be uploaded in this part. Amazon Glacier uses
+ this information to assemble the archive in the proper sequence.
+ The format of this header follows RFC 2616. An example header is
+ Content-Range:bytes 0-4194303/*.
:type part_data: bytes
:param part_data: The data to be uploaded for the part
View
41 boto/glacier/vault.py
@@ -300,7 +300,9 @@ def retrieve_archive(self, archive_id, sns_topic=None,
return self.get_job(response['JobId'])
def retrieve_inventory(self, sns_topic=None,
- description=None):
+ description=None, byte_range=None,
+ start_date=None, end_date=None,
+ limit=None):
"""
Initiate a inventory retrieval job to list the items in the
vault. You will need to wait for the notification from
@@ -315,6 +317,18 @@ def retrieve_inventory(self, sns_topic=None,
sends notification when the job is completed and the output
is ready for you to download.
+ :type byte_range: str
+ :param byte_range: Range of bytes to retrieve.
+
+ :type start_date: DateTime
+ :param start_date: Beginning of the date range to query.
+
+ :type end_date: DateTime
+ :param end_date: End of the date range to query.
+
+ :type limit: int
+ :param limit: Limits the number of results returned.
+
:rtype: str
:return: The ID of the job
"""
@@ -323,6 +337,19 @@ def retrieve_inventory(self, sns_topic=None,
job_data['SNSTopic'] = sns_topic
if description is not None:
job_data['Description'] = description
+ if byte_range is not None:
+ job_data['RetrievalByteRange'] = byte_range
+ if start_date is not None or end_date is not None or limit is not None:
+ rparams = {}
+
+ if start_date is not None:
+ rparams['StartDate'] = start_date.isoformat()
+ if end_date is not None:
+ rparams['EndDate'] = end_date.isoformat()
+ if limit is not None:
+ rparams['Limit'] = limit
+
+ job_data['InventoryRetrievalParameters'] = rparams
response = self.layer1.initiate_job(self.name, job_data)
return response['JobId']
@@ -340,6 +367,18 @@ def retrieve_inventory_job(self, **kwargs):
sends notification when the job is completed and the output
is ready for you to download.
+ :type byte_range: str
+ :param byte_range: Range of bytes to retrieve.
+
+ :type start_date: DateTime
+ :param start_date: Beginning of the date range to query.
+
+ :type end_date: DateTime
+ :param end_date: End of the date range to query.
+
+ :type limit: int
+ :param limit: Limits the number of results returned.
+
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the retrieval job.
"""
View
18 boto/gs/key.py
@@ -219,7 +219,7 @@ def get_contents_to_file(self, fp, headers=None,
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
- if self.bucket != None:
+ if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
@@ -528,7 +528,7 @@ def set_contents_from_file(self, fp, headers=None, replace=True,
if hasattr(fp, 'name'):
self.path = fp.name
- if self.bucket != None:
+ if self.bucket is not None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
@@ -552,12 +552,12 @@ def set_contents_from_file(self, fp, headers=None, replace=True,
fp.seek(spos)
size = self.size
- if md5 == None:
+ if md5 is None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
- if self.name == None:
+ if self.name is None:
self.name = self.md5
if not replace:
@@ -792,7 +792,7 @@ def set_acl(self, acl_or_str, headers=None, generation=None,
the acl will only be updated if its current metageneration number is
this value.
"""
- if self.bucket != None:
+ if self.bucket is not None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
@@ -809,7 +809,7 @@ def get_acl(self, headers=None, generation=None):
:rtype: :class:`.gs.acl.ACL`
"""
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
@@ -824,7 +824,7 @@ def get_xml_acl(self, headers=None, generation=None):
:rtype: str
"""
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
@@ -852,7 +852,7 @@ def set_xml_acl(self, acl_str, headers=None, generation=None,
the acl will only be updated if its current metageneration number is
this value.
"""
- if self.bucket != None:
+ if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
@@ -883,7 +883,7 @@ def set_canned_acl(self, acl_str, headers=None, generation=None,