Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge branch 'release-2.14.0'

  • Loading branch information...
commit 5005178d88ef8e4bbe8c30949ffab46e46a716b0 2 parents 3c56d13 + a008516
@danielgtaylor danielgtaylor authored
Showing with 12,671 additions and 2,460 deletions.
  1. +2 −0  .gitignore
  2. +7 −10 README.rst
  3. +1 −1  bin/dynamodb_load
  4. +2 −3 bin/elbadmin
  5. +1 −1  bin/instance_events
  6. +1 −1  bin/list_instances
  7. +75 −23 bin/s3put
  8. +7 −2 boto/__init__.py
  9. +52 −6 boto/auth.py
  10. +2,814 −563 boto/cacerts/cacerts.txt
  11. +8 −5 boto/cloudformation/stack.py
  12. +14 −12 boto/cloudfront/distribution.py
  13. +1 −1  boto/cloudsearch/__init__.py
  14. +17 −9 boto/cloudsearch/layer1.py
  15. +12 −4 boto/cloudsearch/layer2.py
  16. +13 −2 boto/cloudsearch/search.py
  17. +91 −31 boto/connection.py
  18. +3 −0  boto/dynamodb/__init__.py
  19. +6 −0 boto/dynamodb2/__init__.py
  20. +138 −64 boto/dynamodb2/items.py
  21. +13 −4 boto/dynamodb2/layer1.py
  22. +13 −9 boto/dynamodb2/results.py
  23. +52 −14 boto/dynamodb2/table.py
  24. +6 −0 boto/ec2/__init__.py
  25. +25 −8 boto/ec2/address.py
  26. +40 −8 boto/ec2/autoscale/__init__.py
  27. +6 −0 boto/ec2/autoscale/group.py
  28. +7 −0 boto/ec2/autoscale/policy.py
  29. +3 −3 boto/ec2/autoscale/tag.py
  30. +10 −2 boto/ec2/blockdevicemapping.py
  31. +1 −0  boto/ec2/cloudwatch/__init__.py
  32. +2 −2 boto/ec2/cloudwatch/alarm.py
  33. +899 −127 boto/ec2/connection.py
  34. +14 −6 boto/ec2/ec2object.py
  35. +33 −2 boto/ec2/elb/__init__.py
  36. +39 −2 boto/ec2/elb/loadbalancer.py
  37. +21 −1 boto/ec2/elb/policies.py
  38. +58 −24 boto/ec2/image.py
  39. +60 −44 boto/ec2/instance.py
  40. +9 −9 boto/ec2/keypair.py
  41. +46 −6 boto/ec2/networkinterface.py
  42. +7 −4 boto/ec2/placementgroup.py
  43. +124 −2 boto/ec2/reservedinstance.py
  44. +64 −29 boto/ec2/securitygroup.py
  45. +39 −22 boto/ec2/snapshot.py
  46. +6 −4 boto/ec2/spotdatafeedsubscription.py
  47. +5 −2 boto/ec2/spotinstancerequest.py
  48. +36 −16 boto/ec2/volume.py
  49. +3 −0  boto/elasticache/__init__.py
  50. +777 −368 boto/elasticache/layer1.py
  51. +7 −7 boto/emr/__init__.py
  52. +1 −1  boto/emr/connection.py
  53. +2 −2 boto/emr/instance_group.py
  54. +12 −6 boto/exception.py
  55. +10 −2 boto/glacier/layer2.py
  56. +21 −3 boto/gs/bucket.py
  57. +11 −2 boto/gs/key.py
  58. +3 −0  boto/iam/__init__.py
  59. +4 −1 boto/iam/connection.py
  60. +3 −3 boto/manage/server.py
  61. +1 −1  boto/mashups/server.py
  62. +4 −0 boto/mws/response.py
  63. +818 −200 boto/opsworks/layer1.py
  64. +4 −0 boto/provider.py
  65. +1 −1  boto/pyami/installers/ubuntu/ebs.py
  66. +157 −21 boto/rds/__init__.py
  67. +16 −1 boto/rds/dbinstance.py
  68. +69 −0 boto/rds/dbsubnetgroup.py
  69. +85 −0 boto/rds/vpcsecuritygroupmembership.py
  70. +8 −0 boto/redshift/exceptions.py
  71. +154 −41 boto/redshift/layer1.py
  72. +3 −0  boto/route53/record.py
  73. +3 −0  boto/s3/__init__.py
  74. +35 −19 boto/s3/key.py
  75. +1 −1  boto/s3/keyfile.py
  76. +4 −0 boto/s3/multipart.py
  77. +2 −2 boto/sdb/db/manager/__init__.py
  78. +11 −7 boto/sdb/db/manager/sdbmanager.py
  79. +14 −11 boto/sdb/db/model.py
  80. +2 −1  boto/ses/connection.py
  81. +3 −0  boto/sns/__init__.py
  82. +416 −128 boto/sns/connection.py
  83. +2 −0  boto/sqs/__init__.py
  84. +3 −0  boto/sqs/message.py
  85. +4 −0 boto/sts/__init__.py
  86. +6 −2 boto/sts/connection.py
  87. +3 −0  boto/sts/credentials.py
  88. +1 −0  boto/swf/__init__.py
  89. +22 −22 boto/swf/layer1.py
  90. +20 −20 boto/swf/layer2.py
  91. +282 −47 boto/vpc/__init__.py
  92. +5 −2 boto/vpc/vpc.py
  93. +5 −2 boto/vpc/vpnconnection.py
  94. +10 −6 boto/vpc/vpngateway.py
  95. +7 −1 docs/Makefile
  96. +44 −0 docs/source/apps_built_on_boto.rst
  97. +1 −2  docs/source/autoscale_tut.rst
  98. +85 −0 docs/source/commandline.rst
  99. +23 −0 docs/source/contributing.rst
  100. +2 −6 docs/source/dynamodb2_tut.rst
  101. +1 −1  docs/source/ec2_tut.rst
  102. +18 −32 docs/source/index.rst
  103. +7 −0 docs/source/ref/cloudwatch.rst
  104. +2 −15 docs/source/ref/contrib.rst
  105. +20 −6 docs/source/ref/elb.rst
  106. +28 −0 docs/source/ref/opsworks.rst
  107. +0 −7 docs/source/ref/s3.rst
  108. +9 −22 docs/source/ref/sdb_db.rst
  109. +4 −1 docs/source/ref/swf.rst
  110. +21 −0 docs/source/releasenotes/dev.rst
  111. +21 −0 docs/source/releasenotes/releasenotes_template.rst
  112. +1 −1  docs/source/releasenotes/v2.0.0.rst
  113. +54 −0 docs/source/releasenotes/v2.10.0.rst
  114. +62 −0 docs/source/releasenotes/v2.11.0.rst
  115. +32 −0 docs/source/releasenotes/v2.12.0.rst
  116. +40 −0 docs/source/releasenotes/v2.13.0.rst
  117. +39 −0 docs/source/releasenotes/v2.13.2.rst
  118. +11 −0 docs/source/releasenotes/v2.13.3.rst
  119. +63 −0 docs/source/releasenotes/v2.14.0.rst
  120. +1 −1  docs/source/releasenotes/v2.9.8.rst
  121. +50 −0 docs/source/releasenotes/v2.9.9.rst
  122. +1 −1  docs/source/s3_tut.rst
  123. +1 −1  docs/source/sqs_tut.rst
  124. +67 −0 tests/integration/__init__.py
  125. +7 −8 tests/integration/cloudformation/test_cert_verification.py
  126. +8 −9 tests/integration/cloudsearch/test_cert_verification.py
  127. +75 −0 tests/integration/cloudsearch/test_layers.py
  128. +7 −8 tests/integration/dynamodb/test_cert_verification.py
  129. +7 −8 tests/integration/dynamodb2/test_cert_verification.py
  130. +73 −4 tests/integration/dynamodb2/test_highlevel.py
  131. +7 −8 tests/integration/ec2/autoscale/test_cert_verification.py
  132. +7 −8 tests/integration/ec2/cloudwatch/test_cert_verification.py
  133. +7 −8 tests/integration/ec2/elb/test_cert_verification.py
  134. +22 −0 tests/integration/ec2/elb/test_connection.py
  135. +7 −8 tests/integration/ec2/test_cert_verification.py
  136. +50 −1 tests/integration/ec2/test_connection.py
  137. +47 −2 tests/integration/ec2/vpc/test_connection.py
  138. +7 −8 tests/integration/elastictranscoder/test_cert_verification.py
  139. +7 −8 tests/integration/emr/test_cert_verification.py
  140. +7 −8 tests/integration/glacier/test_cert_verification.py
  141. +7 −2 tests/integration/gs/cb_test_harness.py
  142. +62 −33 tests/integration/gs/test_resumable_uploads.py
  143. +2 −2 tests/integration/gs/testcase.py
  144. +2 −1  tests/integration/gs/util.py
  145. +7 −8 tests/integration/iam/test_cert_verification.py
  146. +10 −0 tests/integration/mws/test.py
  147. +7 −8 tests/integration/rds/test_cert_verification.py
  148. +92 −0 tests/integration/rds/test_db_subnet_group.py
  149. +7 −8 tests/integration/redshift/test_cert_verification.py
  150. +7 −8 tests/integration/route53/test_cert_verification.py
  151. +47 −1 tests/integration/route53/test_resourcerecordsets.py
  152. +7 −8 tests/integration/s3/test_cert_verification.py
  153. +11 −0 tests/integration/s3/test_key.py
  154. +20 −0 tests/integration/s3/test_multipart.py
  155. +7 −8 tests/integration/sdb/test_cert_verification.py
  156. +7 −8 tests/integration/ses/test_cert_verification.py
  157. +2 −0  tests/integration/ses/test_connection.py
  158. +7 −8 tests/integration/sns/test_cert_verification.py
  159. +68 −0 tests/integration/sns/test_connection.py
  160. +3 −3 tests/integration/sns/test_sns_sqs_subscription.py
  161. +7 −8 tests/integration/sqs/test_cert_verification.py
  162. +42 −0 tests/integration/sqs/test_connection.py
  163. +7 −8 tests/integration/sts/test_cert_verification.py
  164. +6 −4 tests/integration/sts/test_session_token.py
  165. +7 −8 tests/integration/support/test_cert_verification.py
  166. +7 −8 tests/integration/swf/test_cert_verification.py
  167. +76 −0 tests/unit/auth/test_query.py
  168. +81 −0 tests/unit/auth/test_sigv4.py
  169. +5 −5 tests/unit/cloudformation/test_connection.py
  170. +38 −4 tests/unit/cloudformation/test_stack.py
  171. +21 −0 tests/unit/cloudfront/test_distribution.py
  172. +17 −0 tests/unit/cloudfront/test_signed_urls.py
  173. +3 −12 tests/unit/cloudsearch/test_connection.py
  174. +37 −0 tests/unit/cloudsearch/test_exceptions.py
  175. +35 −3 tests/unit/cloudsearch/test_search.py
  176. +8 −0 tests/unit/dynamodb2/test_layer1.py
  177. +207 −2 tests/unit/dynamodb2/test_table.py
  178. +256 −0 tests/unit/ec2/autoscale/test_group.py
  179. +97 −0 tests/unit/ec2/elb/test_loadbalancer.py
  180. +13 −3 tests/unit/ec2/test_address.py
  181. +54 −0 tests/unit/ec2/test_blockdevicemapping.py
  182. +657 −0 tests/unit/ec2/test_connection.py
  183. +1 −1  tests/unit/ec2/test_instance.py
  184. +104 −44 tests/unit/ec2/test_networkinterface.py
  185. +212 −0 tests/unit/ec2/test_securitygroup.py
  186. +28 −9 tests/unit/ec2/test_volume.py
  187. +57 −0 tests/unit/emr/test_instance_group_args.py
  188. +49 −3 tests/unit/glacier/test_layer2.py
  189. +328 −0 tests/unit/rds/test_connection.py
  190. +7 −14 tests/unit/s3/test_key.py
  191. 0  tests/unit/ses/__init__.py
  192. +82 −0 tests/unit/ses/test_identity.py
  193. +135 −0 tests/unit/sns/test_connection.py
  194. +35 −0 tests/unit/sqs/test_message.py
  195. 0  tests/unit/sts/__init__.py
  196. +88 −0 tests/unit/sts/test_connection.py
  197. +38 −0 tests/unit/sts/test_credentials.py
  198. 0  tests/unit/swf/__init__.py
  199. +87 −0 tests/unit/swf/test_layer2_actors.py
  200. +112 −0 tests/unit/swf/test_layer2_domain.py
  201. +46 −0 tests/unit/swf/test_layer2_types.py
  202. +100 −1 tests/unit/test_connection.py
  203. +45 −3 tests/unit/test_exception.py
  204. +3 −0  tests/unit/vpc/__init__.py
  205. +60 −3 tests/unit/vpc/test_vpc.py
View
2  .gitignore
@@ -12,3 +12,5 @@ MANIFEST
.coverage
*flymake.py
venv
+venv-2.5
+env-2.5
View
17 README.rst
@@ -1,13 +1,13 @@
####
boto
####
-boto 2.9.8
+boto 2.13.3
-Released: 18-July-2013
+Released: 16-September-2013
.. image:: https://travis-ci.org/boto/boto.png?branch=develop
:target: https://travis-ci.org/boto/boto
-
+
.. image:: https://pypip.in/d/boto/badge.png
:target: https://crate.io/packages/boto/
@@ -41,6 +41,7 @@ At the moment, boto supports:
* AWS Elastic Beanstalk
* AWS CloudFormation
* AWS Data Pipeline
+ * AWS Opsworks
* Identity & Access
@@ -49,6 +50,7 @@ At the moment, boto supports:
* Application Services
* Amazon CloudSearch
+ * Amazon Elastic Transcoder
* Amazon Simple Workflow Service (SWF)
* Amazon Simple Queue Service (SQS)
* Amazon Simple Notification Server (SNS)
@@ -89,9 +91,9 @@ Web Services. In addition, boto provides support for other public
services such as Google Storage in addition to private cloud systems
like Eucalyptus, OpenStack and Open Nebula.
-Boto is developed mainly using Python 2.6.6 and Python 2.7.1 on Mac OSX
+Boto is developed mainly using Python 2.6.6 and Python 2.7.3 on Mac OSX
and Ubuntu Maverick. It is known to work on other Linux distributions
-and on Windows. Boto requires no additional libraries or packages
+and on Windows. Most of Boto requires no additional libraries or packages
other than those that are distributed with Python. Efforts are made
to keep boto compatible with Python 2.5.x but no guarantees are made.
@@ -153,11 +155,6 @@ following environment variables to ascertain your credentials:
Credentials and other boto-related settings can also be stored in a
boto config file. See `this`_ for details.
-Copyright (c) 2006-2012 Mitch Garnaat <mitch@garnaat.com>
-Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
-Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
-All rights reserved.
-
.. _pip: http://www.pip-installer.org/
.. _release notes: https://github.com/boto/boto/wiki
.. _github.com: http://github.com/boto/boto
View
2  bin/dynamodb_load
@@ -66,7 +66,7 @@ def load_table(table, in_fd):
data[k] = set(v)
else:
data[k] = v
- table.new_item(attrs=i).put()
+ table.new_item(attrs=data).put()
def dynamodb_load(tables, in_dir, create_tables):
View
5 bin/elbadmin
@@ -118,9 +118,8 @@ def get(elb, name):
instances = [state.instance_id for state in instance_health]
names = {}
- for r in ec2.get_all_instances(instances):
- for i in r.instances:
- names[i.id] = i.tags.get('Name', '')
+ for i in ec2.get_only_instances(instances):
+ names[i.id] = i.tags.get('Name', '')
name_column_width = max([4] + [len(v) for k,v in names.iteritems()]) + 2
View
2  bin/instance_events
@@ -51,7 +51,7 @@ def list(region, headers, order, completed):
ec2 = boto.connect_ec2(region=region)
- reservations = ec2.get_all_instances()
+ reservations = ec2.get_all_reservations()
instanceinfo = {}
events = {}
View
2  bin/list_instances
@@ -76,7 +76,7 @@ def main():
print format_string % headers
print "-" * len(format_string % headers)
- for r in ec2.get_all_instances(filters=filters):
+ for r in ec2.get_all_reservations(filters=filters):
groups = [g.name for g in r.groups]
for i in r.instances:
i.groups = ','.join(groups)
View
98 bin/s3put
@@ -37,7 +37,9 @@ try:
multipart_capable = True
usage_flag_multipart_capable = """ [--multipart]"""
usage_string_multipart_capable = """
- multipart - Upload files as multiple parts. This needs filechunkio."""
+ multipart - Upload files as multiple parts. This needs filechunkio.
+ Requires ListBucket, ListMultipartUploadParts,
+ ListBucketMultipartUploads and PutObject permissions."""
except ImportError as err:
multipart_capable = False
usage_flag_multipart_capable = ""
@@ -46,6 +48,8 @@ except ImportError as err:
'" is missing for multipart support '
+DEFAULT_REGION = 'us-east-1'
+
usage_string = """
SYNOPSIS
s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
@@ -53,7 +57,8 @@ SYNOPSIS
[-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
[-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
[-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced]
- [--header] [--host <s3_host>]""" + usage_flag_multipart_capable + """ path [path...]
+ [--header] [--region <name>] [--host <s3_host>]""" + \
+ usage_flag_multipart_capable + """ path [path...]
Where
access_key - Your AWS Access Key ID. If not supplied, boto will
@@ -103,6 +108,9 @@ SYNOPSIS
updated.
header - key=value pairs of extra header(s) to pass along in the
request
+ region - Manually set a region for buckets that are not in the US
+ classic region. Normally the region is autodetected, but
+ setting this yourself is more efficient.
host - Hostname override, for using an endpoint other then AWS S3
""" + usage_string_multipart_capable + """
@@ -112,9 +120,9 @@ SYNOPSIS
"""
-def usage():
+def usage(status=1):
print usage_string
- sys.exit()
+ sys.exit(status)
def submit_cb(bytes_so_far, total_bytes):
@@ -168,11 +176,13 @@ def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname,
reduced, debug, cb, num_cb, acl='private', headers={},
- guess_mimetype=True, parallel_processes=4):
+ guess_mimetype=True, parallel_processes=4,
+ region=DEFAULT_REGION):
"""
Parallel multipart upload.
"""
- conn = S3Connection(aws_key, aws_secret)
+ conn = boto.s3.connect_to_region(region, aws_access_key_id=aws_key,
+ aws_secret_access_key=aws_secret)
conn.debug = debug
bucket = conn.get_bucket(bucketname)
@@ -242,6 +252,7 @@ def main():
headers = {}
host = None
multipart_requested = False
+ region = None
try:
opts, args = getopt.getopt(
@@ -249,14 +260,14 @@ def main():
['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet',
'secret_key=', 'no_overwrite', 'reduced', 'header=', 'multipart',
- 'host='])
+ 'host=', 'region='])
except:
- usage()
+ usage(1)
# parse opts
for o, a in opts:
if o in ('-h', '--help'):
- usage()
+ usage(0)
if o in ('-a', '--access_key'):
aws_access_key_id = a
if o in ('-b', '--bucket'):
@@ -288,7 +299,7 @@ def main():
if o in ('-r', '--reduced'):
reduced = True
if o in ('--header'):
- (k, v) = a.split("=")
+ (k, v) = a.split("=", 1)
headers[k] = v
if o in ('--host'):
host = a
@@ -297,23 +308,62 @@ def main():
multipart_requested = True
else:
print "multipart upload requested but not capable"
- sys.exit()
+ sys.exit(4)
+ if o in ('--region'):
+ regions = boto.s3.regions()
+ for region_info in regions:
+ if region_info.name == a:
+ region = a
+ break
+ else:
+ raise ValueError('Invalid region %s specified' % a)
if len(args) < 1:
- usage()
+ usage(2)
if not bucket_name:
print "bucket name is required!"
- usage()
+ usage(3)
+
+ connect_args = {
+ 'aws_access_key_id': aws_access_key_id,
+ 'aws_secret_access_key': aws_secret_access_key
+ }
if host:
- c = boto.connect_s3(host=host, aws_access_key_id=aws_access_key_id,
- aws_secret_access_key=aws_secret_access_key)
- else:
- c = boto.connect_s3(aws_access_key_id=aws_access_key_id,
- aws_secret_access_key=aws_secret_access_key)
+ connect_args['host'] = host
+
+ c = boto.s3.connect_to_region(region or DEFAULT_REGION, **connect_args)
c.debug = debug
- b = c.get_bucket(bucket_name)
+ b = c.get_bucket(bucket_name, validate=False)
+
+ # Attempt to determine location and warn if no --host or --region
+ # arguments were passed. Then try to automagically figure out
+ # what should have been passed and fix it.
+ if host is None and region is None:
+ try:
+ location = b.get_location()
+
+ # Classic region will be '', any other will have a name
+ if location:
+ print 'Bucket exists in %s but no host or region given!' % location
+
+ # Override for EU, which is really Ireland according to the docs
+ if location == 'EU':
+ location = 'eu-west-1'
+
+ print 'Automatically setting region to %s' % location
+
+ # Here we create a new connection, and then take the existing
+ # bucket and set it to use the new connection
+ c = boto.s3.connect_to_region(location, **connect_args)
+ c.debug = debug
+ b.connection = c
+ except Exception, e:
+ if debug > 0:
+ print e
+ print 'Could not get bucket region info, skipping...'
+
existing_keys_to_check_against = []
files_to_check_for_upload = []
@@ -350,9 +400,10 @@ def main():
key_name = get_key_name(fullpath, prefix, key_prefix)
if no_overwrite and key_name in existing_keys_to_check_against:
- if not quiet:
- print 'Skipping %s as it exists in s3' % fullpath
- continue
+ if b.get_key(key_name):
+ if not quiet:
+ print 'Skipping %s as it exists in s3' % fullpath
+ continue
if not quiet:
print 'Copying %s to %s/%s' % (fullpath, bucket_name, key_name)
@@ -364,7 +415,8 @@ def main():
multipart_upload(bucket_name, aws_access_key_id,
aws_secret_access_key, fullpath, key_name,
reduced, debug, cb, num_cb,
- grant or 'private', headers)
+ grant or 'private', headers,
+ region=region or DEFAULT_REGION)
else:
singlepart_upload(b, key_name, fullpath, cb=cb, num_cb=num_cb,
policy=grant, reduced_redundancy=reduced,
View
9 boto/__init__.py
@@ -36,10 +36,15 @@
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.9.8'
+__version__ = '2.14.0'
Version = __version__ # for backware compatibility
-UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
+UserAgent = 'Boto/%s Python/%s %s/%s' % (
+ __version__,
+ platform.python_version(),
+ platform.system(),
+ platform.release()
+)
config = Config()
# Regex to disallow buckets violating charset or not [3..255] chars total.
View
58 boto/auth.py
@@ -385,8 +385,9 @@ def signed_headers(self, headers_to_sign):
def canonical_uri(self, http_request):
path = http_request.auth_path
- # Normalize the path.
- normalized = posixpath.normpath(path)
+ # Normalize the path
+ # in windows normpath('/') will be '\\' so we chane it back to '/'
+ normalized = posixpath.normpath(path).replace('\\','/')
# Then urlencode whatever's left.
encoded = urllib.quote(normalized)
if len(path) > 1 and path.endswith('/'):
@@ -430,11 +431,17 @@ def credential_scope(self, http_request):
parts = http_request.host.split('.')
if self.region_name is not None:
region_name = self.region_name
- else:
- if len(parts) == 3:
- region_name = 'us-east-1'
+ elif len(parts) > 1:
+ if parts[1] == 'us-gov':
+ region_name = 'us-gov-west-1'
else:
- region_name = parts[1]
+ if len(parts) == 3:
+ region_name = 'us-east-1'
+ else:
+ region_name = parts[1]
+ else:
+ region_name = parts[0]
+
if self.service_name is not None:
service_name = self.service_name
else:
@@ -509,6 +516,45 @@ def add_auth(self, req, **kwargs):
req.headers['Authorization'] = ','.join(l)
+class QueryAuthHandler(AuthHandler):
+ """
+ Provides pure query construction (no actual signing).
+
+ Mostly useful for STS' ``assume_role_with_web_identity``.
+
+ Does **NOT** escape query string values!
+ """
+
+ capability = ['pure-query']
+
+ def _escape_value(self, value):
+ # Would normally be ``return urllib.quote(value)``.
+ return value
+
+ def _build_query_string(self, params):
+ keys = params.keys()
+ keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
+ pairs = []
+ for key in keys:
+ val = boto.utils.get_utf8_value(params[key])
+ pairs.append(key + '=' + self._escape_value(val))
+ return '&'.join(pairs)
+
+ def add_auth(self, http_request, **kwargs):
+ headers = http_request.headers
+ params = http_request.params
+ qs = self._build_query_string(
+ http_request.params
+ )
+ boto.log.debug('query_string: %s' % qs)
+ headers['Content-Type'] = 'application/json; charset=UTF-8'
+ http_request.body = ''
+ # if this is a retried request, the qs from the previous try will
+ # already be there, we need to get rid of that and rebuild it
+ http_request.path = http_request.path.split('?')[0]
+ http_request.path = http_request.path + '?' + qs
+
+
class QuerySignatureHelper(HmacKeys):
"""
Helper for Query signature based Auth handler.
View
3,377 boto/cacerts/cacerts.txt
2,814 additions, 563 deletions not shown
View
13 boto/cloudformation/stack.py 100644 → 100755
@@ -48,7 +48,10 @@ def endElement(self, name, value, connection):
elif name == "Description":
self.description = value
elif name == "DisableRollback":
- self.disable_rollback = bool(value)
+ if str(value).lower() == 'true':
+ self.disable_rollback = True
+ else:
+ self.disable_rollback = False
elif name == 'StackId':
self.stack_id = value
elif name == 'StackName':
@@ -292,7 +295,7 @@ def __repr__(self):
class StackResourceSummary(object):
def __init__(self, connection=None):
self.connection = connection
- self.last_updated_timestamp = None
+ self.last_updated_time = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
@@ -303,14 +306,14 @@ def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
- if name == "LastUpdatedTimestamp":
+ if name == "LastUpdatedTime":
try:
- self.last_updated_timestamp = datetime.strptime(
+ self.last_updated_time = datetime.strptime(
value,
'%Y-%m-%dT%H:%M:%SZ'
)
except ValueError:
- self.last_updated_timestamp = datetime.strptime(
+ self.last_updated_time = datetime.strptime(
value,
'%Y-%m-%dT%H:%M:%S.%fZ'
)
View
26 boto/cloudfront/distribution.py
@@ -30,7 +30,7 @@
from boto.cloudfront.origin import S3Origin, CustomOrigin
from boto.s3.acl import ACL
-class DistributionConfig:
+class DistributionConfig(object):
def __init__(self, connection=None, origin=None, enabled=False,
caller_reference='', cnames=None, comment='',
@@ -100,7 +100,7 @@ def __init__(self, connection=None, origin=None, enabled=False,
self.cnames = cnames
self.comment = comment
self.trusted_signers = trusted_signers
- self.logging = None
+ self.logging = logging
self.default_root_object = default_root_object
def to_xml(self):
@@ -214,7 +214,7 @@ def to_xml(self):
s += '</StreamingDistributionConfig>\n'
return s
-class DistributionSummary:
+class DistributionSummary(object):
def __init__(self, connection=None, domain_name='', id='',
last_modified_time=None, status='', origin=None,
@@ -279,7 +279,7 @@ class StreamingDistributionSummary(DistributionSummary):
def get_distribution(self):
return self.connection.get_streaming_distribution_info(self.id)
-class Distribution:
+class Distribution(object):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
@@ -362,14 +362,14 @@ def update(self, enabled=None, cnames=None, comment=None):
def enable(self):
"""
- Deactivate the Distribution. A convenience wrapper around
+ Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
- Activate the Distribution. A convenience wrapper around
+ Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
@@ -654,12 +654,14 @@ def _sign_string(message, private_key_file=None, private_key_string=None):
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
- # If private_key_file is a file, read its contents. Otherwise, open it and then read it
- if isinstance(private_key_file, file):
- private_key_string = private_key_file.read()
- elif private_key_file:
- with open(private_key_file, 'r') as file_handle:
- private_key_string = file_handle.read()
+ # If private_key_file is a file name, open it and read it
+ if private_key_string is None:
+ if isinstance(private_key_file, basestring):
+ with open(private_key_file, 'r') as file_handle:
+ private_key_string = file_handle.read()
+ # Otherwise, treat it like a file
+ else:
+ private_key_string = private_key_file.read()
# Sign it!
private_key = rsa.PrivateKey.load_pkcs1(private_key_string)
View
2  boto/cloudsearch/__init__.py
@@ -21,7 +21,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-from boto.ec2.regioninfo import RegionInfo
+from boto.regioninfo import RegionInfo
def regions():
View
26 boto/cloudsearch/layer1.py
@@ -51,17 +51,25 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
- AWSQueryConnection.__init__(self, aws_access_key_id,
- aws_secret_access_key,
- is_secure, port, proxy, proxy_port,
- proxy_user, proxy_pass,
- self.region.endpoint, debug,
- https_connection_factory, path,
- security_token,
- validate_certs=validate_certs)
+ AWSQueryConnection.__init__(
+ self,
+ host=self.region.endpoint,
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ is_secure=is_secure,
+ port=port,
+ proxy=proxy,
+ proxy_port=proxy_port,
+ proxy_user=proxy_user,
+ proxy_pass=proxy_pass,
+ debug=debug,
+ https_connection_factory=https_connection_factory,
+ path=path,
+ security_token=security_token,
+ validate_certs=validate_certs)
def _required_auth_capability(self):
- return ['sign-v2']
+ return ['hmac-v4']
def get_response(self, doc_path, action, params, path='/',
parent=None, verb='GET', list_marker=None):
View
16 boto/cloudsearch/layer2.py
@@ -32,10 +32,18 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
host=None, debug=0, session_token=None, region=None,
validate_certs=True):
- self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
- is_secure, port, proxy, proxy_port,
- host, debug, session_token, region,
- validate_certs=validate_certs)
+ self.layer1 = Layer1(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ is_secure=is_secure,
+ port=port,
+ proxy=proxy,
+ proxy_port=proxy_port,
+ host=host,
+ debug=debug,
+ security_token=session_token,
+ region=region,
+ validate_certs=validate_certs)
def list_domains(self, domain_names=None):
"""
View
15 boto/cloudsearch/search.py
@@ -37,7 +37,6 @@ class CommitMismatchError(Exception):
class SearchResults(object):
-
def __init__(self, **attrs):
self.rid = attrs['info']['rid']
# self.doc_coverage_pct = attrs['info']['doc-coverage-pct']
@@ -289,7 +288,19 @@ def __call__(self, query):
params = query.to_params()
r = requests.get(url, params=params)
- data = json.loads(r.content)
+ try:
+ data = json.loads(r.content)
+ except ValueError, e:
+ if r.status_code == 403:
+ msg = ''
+ import re
+ g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', r.content)
+ try:
+ msg = ': %s' % (g.groups()[0].strip())
+ except AttributeError:
+ pass
+ raise SearchServiceException('Authentication error from Amazon%s' % msg)
+ raise SearchServiceException("Got non-json response from Amazon")
data['query'] = query
data['search_service'] = self
View
122 boto/connection.py
@@ -101,7 +101,7 @@
class HostConnectionPool(object):
"""
- A pool of connections for one remote (host,is_secure).
+ A pool of connections for one remote (host,port,is_secure).
When connections are added to the pool, they are put into a
pending queue. The _mexe method returns connections to the pool
@@ -145,7 +145,7 @@ def put(self, conn):
def get(self):
"""
Returns the next connection in this pool that is ready to be
- reused. Returns None of there aren't any.
+ reused. Returns None if there aren't any.
"""
# Discard ready connections that are too old.
self.clean()
@@ -234,7 +234,7 @@ class ConnectionPool(object):
STALE_DURATION = 60.0
def __init__(self):
- # Mapping from (host,is_secure) to HostConnectionPool.
+ # Mapping from (host,port,is_secure) to HostConnectionPool.
# If a pool becomes empty, it is removed.
self.host_to_pool = {}
# The last time the pool was cleaned.
@@ -259,7 +259,7 @@ def size(self):
"""
return sum(pool.size() for pool in self.host_to_pool.values())
- def get_http_connection(self, host, is_secure):
+ def get_http_connection(self, host, port, is_secure):
"""
Gets a connection from the pool for the named host. Returns
None if there is no connection that can be reused. It's the caller's
@@ -268,18 +268,18 @@ def get_http_connection(self, host, is_secure):
"""
self.clean()
with self.mutex:
- key = (host, is_secure)
+ key = (host, port, is_secure)
if key not in self.host_to_pool:
return None
return self.host_to_pool[key].get()
- def put_http_connection(self, host, is_secure, conn):
+ def put_http_connection(self, host, port, is_secure, conn):
"""
Adds a connection to the pool of connections that can be
reused for the named host.
"""
with self.mutex:
- key = (host, is_secure)
+ key = (host, port, is_secure)
if key not in self.host_to_pool:
self.host_to_pool[key] = HostConnectionPool()
self.host_to_pool[key].put(conn)
@@ -517,6 +517,7 @@ def __init__(self, host, aws_access_key_id=None,
self.port = port
else:
self.port = PORTS_BY_SECURITY[is_secure]
+ self.host_header = None
# Timeout used to tell httplib how long to wait for socket timeouts.
# Default is to leave timeout unchanged, which will in turn result in
@@ -541,14 +542,16 @@ def __init__(self, host, aws_access_key_id=None,
aws_secret_access_key,
security_token)
- # Allow config file to override default host and port.
+ # Allow config file to override default host, port, and host header.
if self.provider.host:
self.host = self.provider.host
if self.provider.port:
self.port = self.provider.port
+ if self.provider.host_header:
+ self.host_header = self.provider.host_header
self._pool = ConnectionPool()
- self._connection = (self.server_name(), self.is_secure)
+ self._connection = (self.host, self.port, self.is_secure)
self._last_rs = None
self._auth_handler = auth.get_auth_handler(
host, config, self.provider, self._required_auth_capability())
@@ -673,60 +676,92 @@ def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
print "http_proxy environment variable does not specify " \
"a port, using default"
self.proxy_port = self.port
+
+ self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
self.use_proxy = (self.proxy != None)
- def get_http_connection(self, host, is_secure):
- conn = self._pool.get_http_connection(host, is_secure)
+ def get_http_connection(self, host, port, is_secure):
+ conn = self._pool.get_http_connection(host, port, is_secure)
if conn is not None:
return conn
else:
- return self.new_http_connection(host, is_secure)
+ return self.new_http_connection(host, port, is_secure)
+
+ def skip_proxy(self, host):
+ if not self.no_proxy:
+ return False
+
+ if self.no_proxy == "*":
+ return True
+
+ hostonly = host
+ hostonly = host.split(':')[0]
+
+ for name in self.no_proxy.split(','):
+ if name and (hostonly.endswith(name) or host.endswith(name)):
+ return True
- def new_http_connection(self, host, is_secure):
- if self.use_proxy and not is_secure:
- host = '%s:%d' % (self.proxy, int(self.proxy_port))
+ return False
+
+ def new_http_connection(self, host, port, is_secure):
if host is None:
host = self.server_name()
+
+ # Make sure the host is really just the host, not including
+ # the port number
+ host = host.split(':', 1)[0]
+
+ http_connection_kwargs = self.http_connection_kwargs.copy()
+
+ # Connection factories below expect a port keyword argument
+ http_connection_kwargs['port'] = port
+
+ # Override host with proxy settings if needed
+ if self.use_proxy and not is_secure and \
+ not self.skip_proxy(host):
+ host = self.proxy
+ http_connection_kwargs['port'] = int(self.proxy_port)
+
if is_secure:
boto.log.debug(
'establishing HTTPS connection: host=%s, kwargs=%s',
- host, self.http_connection_kwargs)
- if self.use_proxy:
+ host, http_connection_kwargs)
+ if self.use_proxy and not self.skip_proxy(host):
connection = self.proxy_ssl(host, is_secure and 443 or 80)
elif self.https_connection_factory:
connection = self.https_connection_factory(host)
elif self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
connection = https_connection.CertValidatingHTTPSConnection(
host, ca_certs=self.ca_certificates_file,
- **self.http_connection_kwargs)
+ **http_connection_kwargs)
else:
connection = httplib.HTTPSConnection(host,
- **self.http_connection_kwargs)
+ **http_connection_kwargs)
else:
boto.log.debug('establishing HTTP connection: kwargs=%s' %
- self.http_connection_kwargs)
+ http_connection_kwargs)
if self.https_connection_factory:
# even though the factory says https, this is too handy
# to not be able to allow overriding for http also.
connection = self.https_connection_factory(host,
- **self.http_connection_kwargs)
+ **http_connection_kwargs)
else:
connection = httplib.HTTPConnection(host,
- **self.http_connection_kwargs)
+ **http_connection_kwargs)
if self.debug > 1:
connection.set_debuglevel(self.debug)
# self.connection must be maintained for backwards-compatibility
# however, it must be dynamically pulled from the connection pool
# set a private variable which will enable that
if host.split(':')[0] == self.host and is_secure == self.is_secure:
- self._connection = (host, is_secure)
+ self._connection = (host, port, is_secure)
# Set the response class of the http connection to use our custom
# class.
connection.response_class = HTTPResponse
return connection
- def put_http_connection(self, host, is_secure, connection):
- self._pool.put_http_connection(host, is_secure, connection)
+ def put_http_connection(self, host, port, is_secure, connection):
+ self._pool.put_http_connection(host, port, is_secure, connection)
def proxy_ssl(self, host=None, port=None):
if host and port:
@@ -819,6 +854,7 @@ def _mexe(self, request, sender=None, override_num_retries=None,
boto.log.debug('Data: %s' % request.body)
boto.log.debug('Headers: %s' % request.headers)
boto.log.debug('Host: %s' % request.host)
+ boto.log.debug('Port: %s' % request.port)
boto.log.debug('Params: %s' % request.params)
response = None
body = None
@@ -828,7 +864,8 @@ def _mexe(self, request, sender=None, override_num_retries=None,
else:
num_retries = override_num_retries
i = 0
- connection = self.get_http_connection(request.host, self.is_secure)
+ connection = self.get_http_connection(request.host, request.port,
+ self.is_secure)
while i <= num_retries:
# Use binary exponential backoff to desynchronize client requests.
next_sleep = random.random() * (2 ** i)
@@ -836,6 +873,12 @@ def _mexe(self, request, sender=None, override_num_retries=None,
# we now re-sign each request before it is retried
boto.log.debug('Token: %s' % self.provider.security_token)
request.authorize(connection=self)
+ # Only force header for non-s3 connections, because s3 uses
+ # an older signing method + bucket resource URLs that include
+ # the port info. All others should be now be up to date and
+ # not include the port.
+ if 's3' not in self._required_auth_capability():
+ request.headers['Host'] = self.host.split(':', 1)[0]
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
@@ -865,24 +908,38 @@ def _mexe(self, request, sender=None, override_num_retries=None,
body = response.read()
elif response.status < 300 or response.status >= 400 or \
not location:
- self.put_http_connection(request.host, self.is_secure,
- connection)
+ # don't return connection to the pool if response contains
+ # Connection:close header, because the connection has been
+ # closed and default reconnect behavior may do something
+ # different than new_http_connection. Also, it's probably
+ # less efficient to try to reuse a closed connection.
+ conn_header_value = response.getheader('connection')
+ if conn_header_value == 'close':
+ connection.close()
+ else:
+ self.put_http_connection(request.host, request.port,
+ self.is_secure, connection)
return response
else:
scheme, request.host, request.path, \
params, query, fragment = urlparse.urlparse(location)
if query:
request.path += '?' + query
+ # urlparse can return both host and port in netloc, so if
+ # that's the case we need to split them up properly
+ if ':' in request.host:
+ request.host, request.port = request.host.split(':', 1)
msg = 'Redirecting: %s' % scheme + '://'
msg += request.host + request.path
boto.log.debug(msg)
connection = self.get_http_connection(request.host,
+ request.port,
scheme == 'https')
response = None
continue
except PleaseRetryException, e:
boto.log.debug('encountered a retry exception: %s' % e)
- connection = self.new_http_connection(request.host,
+ connection = self.new_http_connection(request.host, request.port,
self.is_secure)
response = e.response
except self.http_exceptions, e:
@@ -894,7 +951,7 @@ def _mexe(self, request, sender=None, override_num_retries=None,
raise e
boto.log.debug('encountered %s exception, reconnecting' % \
e.__class__.__name__)
- connection = self.new_http_connection(request.host,
+ connection = self.new_http_connection(request.host, request.port,
self.is_secure)
time.sleep(next_sleep)
i += 1
@@ -923,6 +980,9 @@ def build_base_http_request(self, method, path, auth_path,
headers = {}
else:
headers = headers.copy()
+ if (self.host_header and
+ not boto.utils.find_matching_headers('host', headers)):
+ headers['host'] = self.host_header
host = host or self.host
if self.use_proxy:
if not auth_path:
@@ -981,7 +1041,7 @@ def get_utf8_value(self, value):
def make_request(self, action, params=None, path='/', verb='GET'):
http_request = self.build_base_http_request(verb, path, None,
params, {}, '',
- self.server_name())
+ self.host)
if action:
http_request.params['Action'] = action
if self.APIVersion:
View
3  boto/dynamodb/__init__.py
@@ -35,6 +35,9 @@ def regions():
return [RegionInfo(name='us-east-1',
endpoint='dynamodb.us-east-1.amazonaws.com',
connection_cls=boto.dynamodb.layer2.Layer2),
+ RegionInfo(name='us-gov-west-1',
+ endpoint='dynamodb.us-gov-west-1.amazonaws.com',
+ connection_cls=boto.dynamodb.layer2.Layer2),
RegionInfo(name='us-west-1',
endpoint='dynamodb.us-west-1.amazonaws.com',
connection_cls=boto.dynamodb.layer2.Layer2),
View
6 boto/dynamodb2/__init__.py
@@ -35,6 +35,9 @@ def regions():
return [RegionInfo(name='us-east-1',
endpoint='dynamodb.us-east-1.amazonaws.com',
connection_cls=DynamoDBConnection),
+ RegionInfo(name='us-gov-west-1',
+ endpoint='dynamodb.us-gov-west-1.amazonaws.com',
+ connection_cls=DynamoDBConnection),
RegionInfo(name='us-west-1',
endpoint='dynamodb.us-west-1.amazonaws.com',
connection_cls=DynamoDBConnection),
@@ -50,6 +53,9 @@ def regions():
RegionInfo(name='ap-southeast-1',
endpoint='dynamodb.ap-southeast-1.amazonaws.com',
connection_cls=DynamoDBConnection),
+ RegionInfo(name='ap-southeast-2',
+ endpoint='dynamodb.ap-southeast-2.amazonaws.com',
+ connection_cls=DynamoDBConnection),
RegionInfo(name='sa-east-1',
endpoint='dynamodb.sa-east-1.amazonaws.com',
connection_cls=DynamoDBConnection),
View
202 boto/dynamodb2/items.py
@@ -1,3 +1,5 @@
+from copy import deepcopy
+
from boto.dynamodb2.types import Dynamizer
@@ -18,7 +20,7 @@ class Item(object):
data. It also tries to intelligently track how data has changed throughout
the life of the instance, to be as efficient as possible about updates.
"""
- def __init__(self, table, data=None):
+ def __init__(self, table, data=None, loaded=False):
"""
Constructs an (unsaved) ``Item`` instance.
@@ -32,6 +34,10 @@ def __init__(self, table, data=None):
Optionally accepts a ``data`` parameter, which should be a dictionary
of the fields & values of the item.
+ Optionally accepts a ``loaded`` parameter, which should be a boolean.
+ ``True`` if it was preexisting data loaded from DynamoDB, ``False`` if
+ it's new data from the user. Default is ``False``.
+
Example::
>>> users = Table('users')
@@ -57,41 +63,28 @@ def __init__(self, table, data=None):
"""
self.table = table
- self._data = {}
+ self._loaded = loaded
self._orig_data = {}
- self._is_dirty = False
+ self._data = data
self._dynamizer = Dynamizer()
- if data:
- self._data = data
- self._is_dirty = True
+ if self._data is None:
+ self._data = {}
- for key in data.keys():
- self._orig_data[key] = NEWVALUE
+ if self._loaded:
+ self._orig_data = deepcopy(self._data)
def __getitem__(self, key):
return self._data.get(key, None)
def __setitem__(self, key, value):
- # Stow the original value if present, so we can track what's changed.
- if key in self._data:
- self._orig_data[key] = self._data[key]
- else:
- # Use a marker to indicate we've never seen a value for this key.
- self._orig_data[key] = NEWVALUE
-
self._data[key] = value
- self._is_dirty = True
def __delitem__(self, key):
if not key in self._data:
return
- # Stow the original value, so we can track what's changed.
- value = self._data[key]
del self._data[key]
- self._orig_data[key] = value
- self._is_dirty = True
def keys(self):
return self._data.keys()
@@ -112,10 +105,50 @@ def __iter__(self):
def __contains__(self, key):
return key in self._data
- def needs_save(self):
+ def _determine_alterations(self):
+ """
+ Checks the ``-orig_data`` against the ``_data`` to determine what
+ changes to the data are present.
+
+ Returns a dictionary containing the keys ``adds``, ``changes`` &
+ ``deletes``, containing the updated data.
+ """
+ alterations = {
+ 'adds': {},
+ 'changes': {},
+ 'deletes': [],
+ }
+
+ orig_keys = set(self._orig_data.keys())
+ data_keys = set(self._data.keys())
+
+ # Run through keys we know are in both for changes.
+ for key in orig_keys.intersection(data_keys):
+ if self._data[key] != self._orig_data[key]:
+ if self._is_storable(self._data[key]):
+ alterations['changes'][key] = self._data[key]
+ else:
+ alterations['deletes'].append(key)
+
+ # Run through additions.
+ for key in data_keys.difference(orig_keys):
+ if self._is_storable(self._data[key]):
+ alterations['adds'][key] = self._data[key]
+
+ # Run through deletions.
+ for key in orig_keys.difference(data_keys):
+ alterations['deletes'].append(key)
+
+ return alterations
+
+ def needs_save(self, data=None):
"""
Returns whether or not the data has changed on the ``Item``.
+ Optionally accepts a ``data`` argument, which accepts the output from
+ ``self._determine_alterations()`` if you've already called it. Typically
+ unnecessary to do. Default is ``None``.
+
Example:
>>> user.needs_save()
@@ -125,7 +158,17 @@ def needs_save(self):
True
"""
- return self._is_dirty
+ if data is None:
+ data = self._determine_alterations()
+
+ needs_save = False
+
+ for kind in ['adds', 'changes', 'deletes']:
+ if len(data[kind]):
+ needs_save = True
+ break
+
+ return needs_save
def mark_clean(self):
"""
@@ -143,23 +186,16 @@ def mark_clean(self):
False
"""
- self._orig_data = {}
- self._is_dirty = False
+ self._orig_data = deepcopy(self._data)
def mark_dirty(self):
"""
- Marks an ``Item`` instance as needing to be saved.
-
- Example:
-
- >>> user.needs_save()
- False
- >>> user.mark_dirty()
- >>> user.needs_save()
- True
+ DEPRECATED: Marks an ``Item`` instance as needing to be saved.
+ This method is no longer necessary, as the state tracking on ``Item``
+ has been improved to automatically detect proper state.
"""
- self._is_dirty = True
+ return
def load(self, data):
"""
@@ -175,7 +211,8 @@ def load(self, data):
for field_name, field_value in data.get('Item', {}).items():
self[field_name] = self._dynamizer.decode(field_value)
- self.mark_clean()
+ self._loaded = True
+ self._orig_data = deepcopy(self._data)
def get_keys(self):
"""
@@ -229,30 +266,42 @@ def build_expects(self, fields=None):
raise ValueError("Unknown key %s provided." % key)
# States:
- # * New field (_data & _orig_data w/ marker)
- # * Unchanged field (only _data)
- # * Modified field (_data & _orig_data)
- # * Deleted field (only _orig_data)
- if not key in self._orig_data:
+ # * New field (only in _data)
+ # * Unchanged field (in both _data & _orig_data, same data)
+ # * Modified field (in both _data & _orig_data, different data)
+ # * Deleted field (only in _orig_data)
+ orig_value = self._orig_data.get(key, NEWVALUE)
+ current_value = self._data.get(key, NEWVALUE)
+
+ if orig_value == current_value:
# Existing field unchanged.
- value = self._data[key]
+ value = current_value
else:
if key in self._data:
- if self._orig_data[key] is NEWVALUE:
+ if not key in self._orig_data:
# New field.
expects[key]['Exists'] = False
else:
# Existing field modified.
- value = self._orig_data[key]
+ value = orig_value
else:
# Existing field deleted.
- value = self._orig_data[key]
+ value = orig_value
if value is not None:
expects[key]['Value'] = self._dynamizer.encode(value)
return expects
+ def _is_storable(self, value):
+ # We need to prevent ``None``, empty string & empty set from
+ # heading to DDB, but allow false-y values like 0 & False make it.
+ if not value:
+ if not value in (0, 0.0, False):
+ return False
+
+ return True
+
def prepare_full(self):
"""
Runs through all fields & encodes them to be handed off to DynamoDB
@@ -265,6 +314,9 @@ def prepare_full(self):
final_data = {}
for key, value in self._data.items():
+ if not self._is_storable(value):
+ continue
+
final_data[key] = self._dynamizer.encode(value)
return final_data
@@ -280,22 +332,30 @@ def prepare_partial(self):
# This doesn't save on it's own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
+ fields = set()
+ alterations = self._determine_alterations()
- # Loop over ``_orig_data`` so that we only build up data that's changed.
- for key, value in self._orig_data.items():
- if key in self._data:
- # It changed.
- final_data[key] = {
- 'Action': 'PUT',
- 'Value': self._dynamizer.encode(self._data[key])
- }
- else:
- # It was deleted.
- final_data[key] = {
- 'Action': 'DELETE',
- }
+ for key, value in alterations['adds'].items():
+ final_data[key] = {
+ 'Action': 'PUT',
+ 'Value': self._dynamizer.encode(self._data[key])
+ }
+ fields.add(key)
- return final_data
+ for key, value in alterations['changes'].items():
+ final_data[key] = {
+ 'Action': 'PUT',
+ 'Value': self._dynamizer.encode(self._data[key])
+ }
+ fields.add(key)
+
+ for key in alterations['deletes']:
+ final_data[key] = {
+ 'Action': 'DELETE',
+ }
+ fields.add(key)
+
+ return final_data, fields
def partial_save(self):
"""
@@ -316,14 +376,28 @@ def partial_save(self):
>>> user.partial_save()
"""
- if not self.needs_save():
- return False
-
key = self.get_keys()
# Build a new dict of only the data we're changing.
- final_data = self.prepare_partial()
+ final_data, fields = self.prepare_partial()
+
+ if not final_data:
+ return False
+
+ # Remove the key(s) from the ``final_data`` if present.
+ # They should only be present if this is a new item, in which
+ # case we shouldn't be sending as part of the data to update.
+ for fieldname, value in key.items():
+ if fieldname in final_data:
+ del final_data[fieldname]
+
+ try:
+ # It's likely also in ``fields``, so remove it there too.
+ fields.remove(fieldname)
+ except KeyError:
+ pass
+
# Build expectations of only the fields we're planning to update.
- expects = self.build_expects(fields=self._orig_data.keys())
+ expects = self.build_expects(fields=fields)
returned = self.table._update_item(key, final_data, expects=expects)
# Mark the object as clean.
self.mark_clean()
@@ -359,7 +433,7 @@ def save(self, overwrite=False):
>>> user.save(overwrite=True)
"""
- if not self.needs_save():
+ if not self.needs_save() and not overwrite:
return False
final_data = self.prepare_full()
View
17 boto/dynamodb2/layer1.py
@@ -21,7 +21,11 @@
#
from binascii import crc32
-import json
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
@@ -67,7 +71,11 @@ def __init__(self, **kwargs):
if reg.name == region_name:
region = reg
break
- kwargs['host'] = region.endpoint
+
+ # Only set host if it isn't manually overwritten
+ if 'host' not in kwargs:
+ kwargs['host'] = region.endpoint
+
AWSQueryConnection.__init__(self, **kwargs)
self.region = region
self._validate_checksums = boto.config.getbool(
@@ -1467,13 +1475,13 @@ def update_table(self, table_name, provisioned_throughput):
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
- 'Host': self.region.endpoint,
+ 'Host': self.host,
'Content-Type': 'application/x-amz-json-1.0',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
- headers=headers, data=body)
+ headers=headers, data=body, host=self.host)
response = self._mexe(http_request, sender=None,
override_num_retries=self.NumberRetries,
retry_handler=self._retry_handler)
@@ -1491,6 +1499,7 @@ def make_request(self, action, body):
def _retry_handler(self, response, i, next_sleep):
status = None
+ boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
response_body = response.read()
boto.log.debug(response_body)
View
22 boto/dynamodb2/results.py
@@ -58,6 +58,12 @@ def next(self):
self.fetch_more()
+ # It's possible that previous call to ``fetch_more`` may not return
+ # anything useful but there may be more results. Loop until we get
+ # something back, making sure we guard for no results left.
+ while not len(self._results) and self._results_left:
+ self.fetch_more()
+
if self._offset < len(self._results):
return self._results[self._offset]
else:
@@ -106,16 +112,11 @@ def fetch_more(self):
kwargs[self.first_key] = self._last_key_seen
results = self.the_callable(*args, **kwargs)
-
- if not len(results.get('results', [])):
- self._results_left = False
- return
-
- self._results.extend(results['results'])
+ new_results = results.get('results', [])
self._last_key_seen = results.get('last_key', None)
- if self._last_key_seen is None:
- self._results_left = False
+ if len(new_results):
+ self._results.extend(results['results'])
# Decrease the limit, if it's present.
if self.call_kwargs.get('limit'):
@@ -124,7 +125,10 @@ def fetch_more(self):
# results to look for
if 0 == self.call_kwargs['limit']:
self._results_left = False
-
+
+ if self._last_key_seen is None:
+ self._results_left = False
+
class BatchGetResultSet(ResultSet):
def __init__(self, *args, **kwargs):
View
66 boto/dynamodb2/table.py
@@ -1,3 +1,4 @@
+import boto
from boto.dynamodb2 import exceptions
from boto.dynamodb2.fields import (HashKey, RangeKey,
AllIndex, KeysOnlyIndex, IncludeIndex)
@@ -57,7 +58,7 @@ def __init__(self, table_name, schema=None, throughput=None, indexes=None,
>>> conn = Table('users')
# The full, minimum-extra-calls case.
- >>> from boto.dynamodb2.layer1 import DynamoDBConnection
+ >>> from boto import dynamodb2
>>> users = Table('users', schema=[
... HashKey('username'),
... RangeKey('date_joined', data_type=NUMBER)
@@ -69,11 +70,10 @@ def __init__(self, table_name, schema=None, throughput=None, indexes=None,
... RangeKey('date_joined')
... ]),
... ],
- ... connection=DynamoDBConnection(
- ... aws_access_key_id='key',
- ... aws_secret_access_key='key',
- ... region='us-west-2'
- ... ))
+ ... connection=dynamodb2.connect_to_region('us-west-2',
+ ... aws_access_key_id='key',
+ ... aws_secret_access_key='key',
+ ... ))
"""
self.table_name = table_name
@@ -133,7 +133,7 @@ def create(cls, table_name, schema, throughput=None, indexes=None,
Example::
- >>> users = Table.create_table('users', schema=[
+ >>> users = Table.create('users', schema=[
... HashKey('username'),
... RangeKey('date_joined', data_type=NUMBER)
... ], throughput={
@@ -611,7 +611,7 @@ def _build_filters(self, filter_kwargs, using=QUERY_OPERATORS):
'AttributeValueList': [],
'ComparisonOperator': op,
}
-
+
# Special-case the ``NULL/NOT_NULL`` case.
if field_bits[-1] == 'null':
del lookup['AttributeValueList']
@@ -1071,17 +1071,19 @@ def __init__(self, table):
self.table = table
self._to_put = []
self._to_delete = []
+ self._unprocessed = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
- if not self._to_put and not self._to_delete:
- return False
+ if self._to_put or self._to_delete:
+ # Flush anything that's left.
+ self.flush()
- # Flush anything that's left.
- self.flush()
- return True
+ if self._unprocessed:
+ # Finally, handle anything that wasn't processed.
+ self.resend_unprocessed()
def put_item(self, data, overwrite=False):
self._to_put.append(data)
@@ -1123,7 +1125,43 @@ def flush(self):
}
})
- self.table.connection.batch_write_item(batch_data)
+ resp = self.table.connection.batch_write_item(batch_data)
+ self.handle_unprocessed(resp)
+
self._to_put = []
self._to_delete = []
return True
+
+ def handle_unprocessed(self, resp):
+ if len(resp.get('UnprocessedItems', [])):
+ table_name = self.table.table_name
+ unprocessed = resp['UnprocessedItems'].get(table_name, [])
+
+ # Some items have not been processed. Stow them for now &
+ # re-attempt processing on ``__exit__``.
+ msg = "%s items were unprocessed. Storing for later."
+ boto.log.info(msg % len(unprocessed))
+ self._unprocessed.extend(unprocessed)
+
+ def resend_unprocessed(self):
+ # If there are unprocessed records (for instance, the user was over
+ # their throughput limitations), iterate over them & send until they're
+ # all there.
+ boto.log.info(
+ "Re-sending %s unprocessed items." % len(self._unprocessed)
+ )
+
+ while len(self._unprocessed):
+ # Again, do 25 at a time.
+ to_resend = self._unprocessed[:25]
+ # Remove them from the list.
+ self._unprocessed = self._unprocessed[25:]
+ batch_data = {
+ self.table.table_name: to_resend
+ }
+ boto.log.info("Sending %s items" % len(to_resend))
+ resp = self.table.connection.batch_write_item(batch_data)
+ self.handle_unprocessed(resp)
+ boto.log.info(
+ "%s unprocessed items left" % len(self._unprocessed)
+ )
View
6 boto/ec2/__init__.py
@@ -29,6 +29,7 @@
RegionData = {
'us-east-1': 'ec2.us-east-1.amazonaws.com',
+ 'us-gov-west-1': 'ec2.us-gov-west-1.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
'us-west-2': 'ec2.us-west-2.amazonaws.com',
'sa-east-1': 'ec2.sa-east-1.amazonaws.com',
@@ -72,9 +73,14 @@ def connect_to_region(region_name, **kw_params):
:return: A connection to the given region, or None if an invalid region
name is given
"""
+ if 'region' in kw_params and isinstance(kw_params['region'], RegionInfo)\
+ and region_name == kw_params['region'].name:
+ return EC2Connection(**kw_params)
+
for region in regions(**kw_params):
if region.name == region_name:
return region.connect(**kw_params)
+
return None
View
33 boto/ec2/address.py
@@ -71,33 +71,50 @@ def endElement(self, name, value, connection):
else:
setattr(self, name, value)
- def release(self):
+ def release(self, dry_run=False):
"""
Free up this Elastic IP address.
:see: :meth:`boto.ec2.connection.EC2Connection.release_address`
"""
if self.allocation_id:
- return self.connection.release_address(None, self.allocation_id)
+ return self.connection.release_address(
+ None,
+ self.allocation_id,
+ dry_run=dry_run)
else:
- return self.connection.release_address(self.public_ip)
+ return self.connection.release_address(
+ self.public_ip,
+ dry_run=dry_run
+ )
delete = release
- def associate(self, instance_id):
+ def associate(self, instance_id, dry_run=False):
"""
Associate this Elastic IP address with a currently running instance.
:see: :meth:`boto.ec2.connection.EC2Connection.associate_address`
"""
- return self.connection.associate_address(instance_id, self.public_ip)
+ return self.connection.associate_address(
+ instance_id,
+ self.public_ip,
+ dry_run=dry_run
+ )
- def disassociate(self):
+ def disassociate(self, dry_run=False):
"""
Disassociate this Elastic IP address from a currently running instance.
:see: :meth:`boto.ec2.connection.EC2Connection.disassociate_address`
"""
if self.association_id:
- return self.connection.disassociate_address(None, self.association_id)
+ return self.connection.disassociate_address(
+ None,
+ self.association_id,
+ dry_run=dry_run
+ )
else:
- return self.connection.disassociate_address(self.public_ip)
+ return self.connection.disassociate_address(
+ self.public_ip,
+ dry_run=dry_run
+ )
View
48 boto/ec2/autoscale/__init__.py
@@ -47,6 +47,7 @@
RegionData = {
'us-east-1': 'autoscaling.us-east-1.amazonaws.com',
+ 'us-gov-west-1': 'autoscaling.us-gov-west-1.amazonaws.com',
'us-west-1': 'autoscaling.us-west-1.amazonaws.com',
'us-west-2': 'autoscaling.us-west-2.amazonaws.com',
'sa-east-1': 'autoscaling.sa-east-1.amazonaws.com',
@@ -224,8 +225,7 @@ def create_launch_configuration(self, launch_config):
if launch_config.ramdisk_id:
params['RamdiskId'] = launch_config.ramdisk_id
if launch_config.block_device_mappings:
- self.build_list_params(params, launch_config.block_device_mappings,
- 'BlockDeviceMappings')
+ [x.autoscale_build_list_params(params) for x in launch_config.block_device_mappings]
if launch_config.security_groups:
self.build_list_params(params, launch_config.security_groups,
'SecurityGroups')
@@ -255,6 +255,11 @@ def create_scaling_policy(self, scaling_policy):
'AutoScalingGroupName': scaling_policy.as_name,
'PolicyName': scaling_policy.name,
'ScalingAdjustment': scaling_policy.scaling_adjustment}
+
+ if scaling_policy.adjustment_type == "PercentChangeInCapacity" and \
+ scaling_policy.min_adjustment_step is not None:
+ params['MinAdjustmentStep'] = scaling_policy.min_adjustment_step
+
if scaling_policy.cooldown is not None:
params['Cooldown'] = scaling_policy.cooldown
@@ -487,15 +492,19 @@ def get_all_policies(self, as_group=None, policy_names=None,
If no group name or list of policy names are provided, all
available policies are returned.
- :type as_name: str
- :param as_name: The name of the
+ :type as_group: str
+ :param as_group: The name of the
:class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for.
- :type names: list
- :param names: List of policy names which should be searched for.
+ :type policy_names: list
+ :param policy_names: List of policy names which should be searched for.
:type max_records: int
:param max_records: Maximum amount of groups to return.
+
+ :type next_token: str
+ :param next_token: If you have more results than can be returned
+ at once, pass in this parameter to page through all results.
"""
params = {}
if as_group:
@@ -676,9 +685,9 @@ def put_notification_configuration(self, autoscale_group, topic, notification_ty
Configures an Auto Scaling group to send notifications when
specified events take place.
- :type as_group: str or
+ :type autoscale_group: str or
:class:`boto.ec2.autoscale.group.AutoScalingGroup` object
- :param as_group: The Auto Scaling group to put notification
+ :param autoscale_group: The Auto Scaling group to put notification
configuration on.
:type topic: str
@@ -699,6 +708,29 @@ def put_notification_configuration(self, autoscale_group, topic, notification_ty
self.build_list_params(params, notification_types, 'NotificationTypes')
return self.get_status('PutNotificationConfiguration', params)
+ def delete_notification_configuration(self, autoscale_group, topic):
+ """
+ Deletes notifications created by put_notification_configuration.
+
+ :type autoscale_group: str or
+ :class:`boto.ec2.autoscale.group.AutoScalingGroup` object
+ :param autoscale_group: The Auto Scaling group to put notification
+ configuration on.
+
+ :type topic: str
+ :param topic: The Amazon Resource Name (ARN) of the Amazon Simple
+ Notification Service (SNS) topic.
+ """
+
+ name = autoscale_group
+ if isinstance(autoscale_group, AutoScalingGroup):
+ name = autoscale_group.name
+
+ params = {'AutoScalingGroupName': name,
+ 'TopicARN': topic}
+
+ return self.get_status('DeleteNotificationConfiguration', params)
+
def set_instance_health(self, instance_id, health_status,
should_respect_grace_period=True):
"""
View
6