Permalink
Browse files

Merge branch 'release-2.7.0'

  • Loading branch information...
2 parents 7073e7a + 4f6fde8 commit f325c6e3e7db5f1c2638108fb40c2a34a5470f27 @garnaat garnaat committed Jan 10, 2013
Showing with 10,545 additions and 2,150 deletions.
  1. +18 −0 LICENSE
  2. +34 −15 README.rst
  3. +20 −0 bin/cfadmin
  4. +22 −4 bin/elbadmin
  5. +20 −14 bin/fetch_file
  6. +15 −15 bin/glacier
  7. +9 −1 bin/list_instances
  8. +419 −0 bin/mturk
  9. +0 −323 bin/s3multiput
  10. +228 −79 bin/s3put
  11. +1 −1 boto/__init__.py
  12. +63 −33 boto/auth.py
  13. +69 −0 boto/beanstalk/__init__.py
  14. +3 −1 boto/cloudformation/__init__.py
  15. +10 −8 boto/cloudformation/connection.py
  16. +7 −1 boto/cloudformation/stack.py
  17. +25 −30 boto/cloudfront/distribution.py
  18. +1 −4 boto/cloudsearch/document.py
  19. +1 −4 boto/cloudsearch/domain.py
  20. +3 −4 boto/cloudsearch/optionstatus.py
  21. +28 −0 boto/compat.py
  22. +29 −6 boto/connection.py
  23. +0 −52 boto/contrib/m2helpers.py
  24. 0 boto/datapipeline/__init__.py
  25. +42 −0 boto/datapipeline/exceptions.py
  26. +546 −0 boto/datapipeline/layer1.py
  27. +3 −0 boto/dynamodb/__init__.py
  28. +16 −3 boto/dynamodb/batch.py
  29. +19 −0 boto/dynamodb/exceptions.py
  30. +47 −26 boto/dynamodb/layer1.py
  31. +175 −103 boto/dynamodb/layer2.py
  32. +38 −0 boto/dynamodb/schema.py
  33. +88 −38 boto/dynamodb/table.py
  34. +201 −13 boto/dynamodb/types.py
  35. +20 −2 boto/ec2/__init__.py
  36. +16 −1 boto/ec2/autoscale/__init__.py
  37. +11 −4 boto/ec2/autoscale/group.py
  38. +11 −0 boto/ec2/autoscale/policy.py
  39. +10 −8 boto/ec2/cloudwatch/__init__.py
  40. +1 −5 boto/ec2/cloudwatch/alarm.py
  41. +78 −21 boto/ec2/connection.py
  42. +24 −12 boto/ec2/elb/__init__.py
  43. +6 −4 boto/ec2/elb/healthcheck.py
  44. +0 −3 boto/ec2/elb/instancestate.py
  45. +7 −6 boto/ec2/elb/listelement.py
  46. +4 −5 boto/ec2/elb/listener.py
  47. +14 −0 boto/ec2/elb/loadbalancer.py
  48. +40 −4 boto/ec2/instance.py
  49. +91 −7 boto/ec2/networkinterface.py
  50. +3 −0 boto/ec2/reservedinstance.py
  51. +73 −0 boto/ec2/spotinstancerequest.py
  52. +3 −0 boto/emr/__init__.py
  53. +18 −0 boto/emr/connection.py
  54. +1 −0 boto/emr/emrobject.py
  55. +25 −13 boto/emr/step.py
  56. +15 −34 boto/exception.py
  57. +241 −45 boto/glacier/concurrent.py
  58. +8 −4 boto/glacier/exceptions.py
  59. +22 −6 boto/glacier/job.py
  60. +19 −7 boto/glacier/layer1.py
  61. +3 −2 boto/glacier/response.py
  62. +144 −0 boto/glacier/utils.py
  63. +132 −17 boto/glacier/vault.py
  64. +174 −102 boto/glacier/writer.py
  65. +181 −11 boto/gs/bucket.py
  66. +64 −0 boto/gs/bucketlistresultset.py
  67. +24 −15 boto/gs/connection.py
  68. +44 −0 boto/gs/key.py
  69. +0 −2 boto/handler.py
  70. +1 −1 boto/https_connection.py
  71. +1 −5 boto/iam/connection.py
  72. +6 −6 boto/manage/cmdshell.py
  73. +257 −158 boto/mturk/connection.py
  74. +55 −0 boto/mturk/layoutparam.py
  75. +88 −45 boto/mturk/question.py
  76. +48 −12 boto/mws/connection.py
  77. +11 −2 boto/mws/response.py
  78. +18 −1 boto/provider.py
  79. +3 −11 boto/pyami/config.py
  80. +43 −7 boto/rds/__init__.py
  81. +57 −12 boto/rds/dbinstance.py
  82. +24 −3 boto/rds/dbsnapshot.py
  83. +3 −1 boto/resultset.py
  84. +74 −14 boto/route53/connection.py
  85. +16 −1 boto/route53/record.py
  86. +42 −0 boto/route53/status.py
  87. +412 −0 boto/route53/zone.py
  88. +3 −0 boto/s3/__init__.py
  89. +29 −19 boto/s3/bucket.py
  90. +0 −2 boto/s3/bucketlistresultset.py
  91. +49 −27 boto/s3/connection.py
  92. +0 −2 boto/s3/deletemarker.py
  93. +207 −11 boto/s3/key.py
  94. +118 −13 boto/s3/lifecycle.py
  95. +37 −22 boto/s3/multipart.py
  96. +3 −0 boto/s3/tagging.py
  97. +3 −1 boto/sdb/__init__.py
  98. +22 −16 boto/sdb/db/test_db.py
  99. +78 −3 boto/ses/connection.py
  100. +3 −0 boto/sns/__init__.py
  101. +11 −10 boto/sns/connection.py
  102. +3 −1 boto/sqs/__init__.py
  103. +23 −10 boto/sqs/connection.py
  104. +4 −6 boto/sqs/jsonmessage.py
  105. +35 −14 boto/sqs/queue.py
  106. +129 −21 boto/storage_uri.py
  107. +54 −1 boto/sts/connection.py
  108. +47 −8 boto/sts/credentials.py
  109. +247 −288 boto/swf/layer1.py
  110. +342 −0 boto/swf/layer2.py
  111. +90 −45 boto/utils.py
  112. +216 −30 boto/vpc/__init__.py
  113. +39 −10 docs/source/autoscale_tut.rst
  114. +15 −0 docs/source/boto_config_tut.rst
  115. +107 −8 docs/source/dynamodb_tut.rst
  116. +1 −1 docs/source/elb_tut.rst
  117. +7 −0 docs/source/index.rst
  118. +26 −0 docs/source/ref/beanstalk.rst
  119. +26 −0 docs/source/ref/datapipeline.rst
  120. +6 −1 docs/source/ref/dynamodb.rst
  121. +23 −16 docs/source/ref/ec2.rst
  122. +8 −1 docs/source/ref/glacier.rst
  123. +2 −0 docs/source/ref/index.rst
  124. +7 −0 docs/source/ref/mturk.rst
  125. +33 −0 docs/source/ref/mws.rst
  126. +102 −3 docs/source/s3_tut.rst
  127. +2 −2 requirements.txt
  128. +10 −4 setup.py
  129. +110 −0 tests/integration/cloudformation/test_connection.py
  130. +122 −0 tests/integration/datapipeline/test_layer1.py
  131. +75 −21 tests/integration/dynamodb/test_layer2.py
  132. +84 −0 tests/integration/dynamodb/test_table.py
  133. +2 −0 tests/integration/ec2/cloudwatch/test_connection.py
  134. +3 −3 tests/integration/ec2/test_cert_verification.py
  135. 0 tests/integration/ec2/vpc/__init__.py
  136. +95 −0 tests/integration/ec2/vpc/test_connection.py
  137. +44 −0 tests/integration/glacier/test_layer1.py
  138. 0 tests/integration/gs/__init__.py
  139. +4 −4 tests/integration/{s3/cb_test_harnass.py → gs/cb_test_harness.py}
  140. +1 −16 tests/integration/{s3/test_gsconnection.py → gs/test_connection.py}
  141. +28 −23 tests/integration/{s3 → gs}/test_resumable_downloads.py
  142. +37 −32 tests/integration/{s3 → gs}/test_resumable_uploads.py
  143. +198 −0 tests/integration/gs/test_versioning.py
  144. +15 −0 tests/integration/gs/util.py
  145. +4 −1 tests/integration/mws/test.py
  146. +132 −0 tests/integration/route53/test_zone.py
  147. +5 −4 tests/integration/s3/mock_storage_service.py
  148. +95 −3 tests/integration/s3/test_bucket.py
  149. +33 −10 tests/integration/s3/test_key.py
  150. +66 −1 tests/integration/sqs/test_connection.py
  151. +5 −1 tests/unit/__init__.py
  152. 0 tests/unit/auth/__init__.py
  153. +73 −0 tests/unit/auth/test_sigv4.py
  154. +63 −0 tests/unit/cloudformation/test_stack.py
  155. 0 tests/unit/dynamodb/__init__.py
  156. +103 −0 tests/unit/dynamodb/test_batch.py
  157. +119 −0 tests/unit/dynamodb/test_layer2.py
  158. +82 −0 tests/unit/dynamodb/test_types.py
  159. 0 tests/unit/ec2/autoscale/__init__.py
  160. +162 −0 tests/unit/ec2/autoscale/test_group.py
  161. +110 −9 tests/unit/ec2/test_connection.py
  162. +153 −8 tests/unit/ec2/test_instance.py
  163. +140 −0 tests/unit/ec2/test_networkinterface.py
  164. +120 −0 tests/unit/glacier/test_concurrent.py
  165. +60 −0 tests/unit/glacier/test_job.py
  166. +22 −2 tests/unit/glacier/test_layer1.py
  167. +120 −1 tests/unit/glacier/test_layer2.py
  168. +111 −0 tests/unit/glacier/test_utils.py
  169. +68 −0 tests/unit/glacier/test_vault.py
  170. +177 −18 tests/unit/glacier/test_writer.py
  171. +27 −0 tests/unit/provider/test_provider.py
  172. 0 tests/unit/rds/__init__.py
  173. +131 −0 tests/unit/rds/test_connection.py
  174. +75 −0 tests/unit/s3/test_key.py
  175. +97 −0 tests/unit/s3/test_lifecycle.py
  176. +10 −0 tests/unit/s3/test_tagging.py
  177. 0 tests/unit/sns/__init__.py
  178. +99 −0 tests/unit/sns/test_connection.py
  179. 0 tests/unit/sqs/__init__.py
  180. +98 −0 tests/unit/sqs/test_connection.py
  181. +40 −0 tests/unit/sqs/test_queue.py
  182. +74 −0 tests/unit/sts/test_connection.py
View
18 LICENSE
@@ -0,0 +1,18 @@
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish, dis-
+tribute, sublicense, and/or sell copies of the Software, and to permit
+persons to whom the Software is furnished to do so, subject to the fol-
+lowing conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
View
49 README.rst
@@ -1,8 +1,8 @@
####
boto
####
-boto 2.6.0
-19-Sep-2012
+boto 2.7.0
+10-Jan-2013
.. image:: https://secure.travis-ci.org/boto/boto.png?branch=develop
:target: https://secure.travis-ci.org/boto/boto
@@ -15,40 +15,66 @@ Boto is a Python package that provides interfaces to Amazon Web Services.
At the moment, boto supports:
* Compute
+
* Amazon Elastic Compute Cloud (EC2)
* Amazon Elastic Map Reduce (EMR)
* AutoScaling
- * Elastic Load Balancing (ELB)
+
* Content Delivery
+
* Amazon CloudFront
+
* Database
+
* Amazon Relational Data Service (RDS)
* Amazon DynamoDB
* Amazon SimpleDB
+
* Deployment and Management
- * AWS Identity and Access Management (IAM)
- * Amazon CloudWatch
+
* AWS Elastic Beanstalk
* AWS CloudFormation
+ * AWS Data Pipeline
+
+* Identity & Access
+
+ * AWS Identity and Access Management (IAM)
+
* Application Services
+
* Amazon CloudSearch
* Amazon Simple Workflow Service (SWF)
* Amazon Simple Queue Service (SQS)
* Amazon Simple Notification Server (SNS)
* Amazon Simple Email Service (SES)
+
+* Montoring
+
+ * Amazon CloudWatch
+
* Networking
+
* Amazon Route53
* Amazon Virtual Private Cloud (VPC)
+ * Elastic Load Balancing (ELB)
+
* Payments and Billing
+
* Amazon Flexible Payment Service (FPS)
+
* Storage
+
* Amazon Simple Storage Service (S3)
* Amazon Glacier
* Amazon Elastic Block Store (EBS)
* Google Cloud Storage
+
* Workforce
+
* Amazon Mechanical Turk
+
* Other
+
* Marketplace Web Services
The goal of boto is to support the full breadth and depth of Amazon
@@ -87,16 +113,6 @@ ChangeLogs
To see what has changed over time in boto, you can check out the
`release notes`_ in the wiki.
-*********************************
-Special Note for Python 3.x Users
-*********************************
-
-If you are interested in trying out boto with Python 3.x, check out the
-`neo`_ branch. This is under active development and the goal is a version
-of boto that works in Python 2.6, 2.7, and 3.x. Not everything is working
-just yet but many things are and it's worth a look if you are an active
-Python 3.x user.
-
***************************
Finding Out More About Boto
***************************
@@ -113,6 +129,8 @@ Boto releases can be found on the `Python Cheese Shop`_.
Join our IRC channel `#boto` on FreeNode.
Webchat IRC channel: http://webchat.freenode.net/?channels=boto
+Join the `boto-users Google Group`_.
+
*************************
Getting Started with Boto
*************************
@@ -141,3 +159,4 @@ All rights reserved.
.. _this: http://code.google.com/p/boto/wiki/BotoConfig
.. _gitflow: http://nvie.com/posts/a-successful-git-branching-model/
.. _neo: https://github.com/boto/boto/tree/neo
+.. _boto-users Google Group: https://groups.google.com/forum/?fromgroups#!forum/boto-users
View
20 bin/cfadmin
@@ -65,6 +65,26 @@ def invalidate(cf, origin_or_id, *paths):
sys.exit(1)
cf.create_invalidation_request(dist.id, paths)
+def listinvalidations(cf, origin_or_id):
+ """List invalidation requests for a given origin"""
+ dist = None
+ for d in cf.get_all_distributions():
+ if d.id == origin_or_id or d.origin.dns_name == origin_or_id:
+ dist = d
+ break
+ if not dist:
+ print "Distribution not found: %s" % origin_or_id
+ sys.exit(1)
+ results = cf.get_invalidation_requests(dist.id)
+ if results:
+ for result in results:
+ if result.status == "InProgress":
+ result = result.get_invalidation_request()
+ print result.id, result.status, result.paths
+ else:
+ print result.id, result.status
+
+
if __name__ == "__main__":
import boto
import sys
View
26 bin/elbadmin
@@ -107,12 +107,30 @@ def get(elb, name):
print
+ # Make map of all instance Id's to Name tags
+ ec2 = boto.connect_ec2()
+
+ instance_health = b.get_instance_health()
+ instances = [state.instance_id for state in instance_health]
+
+ names = {}
+ for r in ec2.get_all_instances(instances):
+ for i in r.instances:
+ names[i.id] = i.tags.get('Name', '')
+
+ name_column_width = max([4] + [len(v) for k,v in names.iteritems()]) + 2
+
print "Instances"
print "---------"
- print "%-12s %-15s %s" % ("ID", "STATE", "DESCRIPTION")
- for state in b.get_instance_health():
- print "%-12s %-15s %s" % (state.instance_id, state.state,
- state.description)
+ print "%-12s %-15s %-*s %s" % ("ID",
+ "STATE",
+ name_column_width, "NAME",
+ "DESCRIPTION")
+ for state in instance_health:
+ print "%-12s %-15s %-*s %s" % (state.instance_id,
+ state.state,
+ name_column_width, names[state.instance_id],
+ state.description)
print
View
34 bin/fetch_file
@@ -15,23 +15,29 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
if __name__ == "__main__":
- from optparse import OptionParser
- parser = OptionParser(version="0.1", usage="Usage: %prog [options] url")
- parser.add_option("-o", "--out-file", help="Output file", dest="outfile")
+ from optparse import OptionParser
+ usage = """%prog [options] URI
+Fetch a URI using the boto library and (by default) pipe contents to STDOUT
+The URI can be either an HTTP URL, or "s3://bucket_name/key_name"
+"""
+ parser = OptionParser(version="0.1", usage=usage)
+ parser.add_option("-o", "--out-file",
+ help="File to receive output instead of STDOUT",
+ dest="outfile")
- (options, args) = parser.parse_args()
- if len(args) < 1:
- parser.print_help()
- exit(1)
- from boto.utils import fetch_file
- f = fetch_file(args[0])
- if options.outfile:
- open(options.outfile, "w").write(f.read())
- else:
- print f.read()
+ (options, args) = parser.parse_args()
+ if len(args) < 1:
+ parser.print_help()
+ exit(1)
+ from boto.utils import fetch_file
+ f = fetch_file(args[0])
+ if options.outfile:
+ open(options.outfile, "w").write(f.read())
+ else:
+ print f.read()
View
30 bin/glacier
@@ -51,15 +51,15 @@ glacier <command> [args]
created
Common args:
- access_key - Your AWS Access Key ID. If not supplied, boto will
- use the value of the environment variable
- AWS_ACCESS_KEY_ID
- secret_key - Your AWS Secret Access Key. If not supplied, boto
- will use the value of the environment variable
- AWS_SECRET_ACCESS_KEY
- region - AWS region to use. Possible vaules: us-east-1, us-west-1,
- us-west-2, ap-northeast-1, eu-west-1.
- Default: us-east-1
+ --access_key - Your AWS Access Key ID. If not supplied, boto will
+ use the value of the environment variable
+ AWS_ACCESS_KEY_ID
+ --secret_key - Your AWS Secret Access Key. If not supplied, boto
+ will use the value of the environment variable
+ AWS_SECRET_ACCESS_KEY
+ --region - AWS region to use. Possible values: us-east-1, us-west-1,
+ us-west-2, ap-northeast-1, eu-west-1.
+ Default: us-east-1
Vaults operations:
@@ -91,18 +91,18 @@ def connect(region, debug_level=0, access_key=None, secret_key=None):
def list_vaults(region, access_key=None, secret_key=None):
- layer2 = connect(region, access_key, secret_key)
+ layer2 = connect(region, access_key = access_key, secret_key = secret_key)
for vault in layer2.list_vaults():
print vault.arn
def list_jobs(vault_name, region, access_key=None, secret_key=None):
- layer2 = connect(region, access_key, secret_key)
+ layer2 = connect(region, access_key = access_key, secret_key = secret_key)
print layer2.layer1.list_jobs(vault_name)
def upload_files(vault_name, filenames, region, access_key=None, secret_key=None):
- layer2 = connect(region, access_key, secret_key)
+ layer2 = connect(region, access_key = access_key, secret_key = secret_key)
layer2.create_vault(vault_name)
glacier_vault = layer2.get_vault(vault_name)
for filename in filenames:
@@ -131,11 +131,11 @@ def main():
access_key = secret_key = None
region = 'us-east-1'
for option, value in opts:
- if option in ('a', '--access_key'):
+ if option in ('-a', '--access_key'):
access_key = value
- elif option in ('s', '--secret_key'):
+ elif option in ('-s', '--secret_key'):
secret_key = value
- elif option in ('r', '--region'):
+ elif option in ('-r', '--region'):
region = value
# handle each command
if command == 'vaults':
View
10 bin/list_instances
@@ -35,8 +35,10 @@ def main():
parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
parser.add_option("-H", "--headers", help="Set headers (use 'T:tagname' for including tags)", default=None, action="store", dest="headers", metavar="ID,Zone,Groups,Hostname,State,T:Name")
parser.add_option("-t", "--tab", help="Tab delimited, skip header - useful in shell scripts", action="store_true", default=False)
+ parser.add_option("-f", "--filter", help="Filter option sent to DescribeInstances API call, format is key1=value1,key2=value2,...", default=None)
(options, args) = parser.parse_args()
+
# Connect the region
for r in regions():
if r.name == options.region:
@@ -62,13 +64,19 @@ def main():
format_string += "%%-%ds" % HEADERS[h]['length']
+ # Parse filters (if any)
+ if options.filter:
+ filters = dict([entry.split('=') for entry in options.filter.split(',')])
+ else:
+ filters = {}
+
# List and print
if not options.tab:
print format_string % headers
print "-" * len(format_string % headers)
- for r in ec2.get_all_instances():
+ for r in ec2.get_all_instances(filters=filters):
groups = [g.name for g in r.groups]
for i in r.instances:
i.groups = ','.join(groups)
View
419 bin/mturk
@@ -0,0 +1,419 @@
+#!/usr/bin/env python
+# Copyright 2012 Kodi Arfer
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+
+import argparse # Hence, Python 2.7 is required.
+import sys
+import os.path
+import string
+import inspect
+import datetime, calendar
+import json
+import boto.mturk.connection, boto.mturk.price, boto.mturk.question, boto.mturk.qualification
+
+# --------------------------------------------------
+# Globals
+# -------------------------------------------------
+
+interactive = False
+con = None
+mturk_website = None
+
+default_nicknames_path = os.path.expanduser('~/.boto_mturkcli_hit_nicknames')
+nicknames = {}
+nickname_pool = set(string.ascii_lowercase)
+
+example_config_file = '''Example configuration file:
+
+ {
+ "title": "Pick your favorite color",
+ "description": "In this task, you are asked to pick your favorite color.",
+ "reward": 0.50,
+ "assignments": 10,
+ "duration": "20 min",
+ "keywords": ["color", "favorites", "survey"],
+ "lifetime": "7 d",
+ "approval_delay": "14 d",
+ "qualifications": [
+ "PercentAssignmentsApproved > 90",
+ "Locale == US"
+ ],
+ "question_url": "http://example.com/myhit",
+ "question_frame_height": 450
+ }'''
+
+# --------------------------------------------------
+# Subroutines
+# --------------------------------------------------
+
+def unjson(path):
+ with open(path) as o:
+ return json.load(o)
+
+def add_argparse_arguments(parser):
+ parser.add_argument('-P', '--production',
+ dest = 'sandbox', action = 'store_false', default = True,
+ help = 'use the production site (default: use the sandbox)')
+ parser.add_argument('--nicknames',
+ dest = 'nicknames_path', metavar = 'PATH',
+ default = default_nicknames_path,
+ help = 'where to store HIT nicknames (default: {})'.format(
+ default_nicknames_path))
+
+def init_by_args(args):
+ init(args.sandbox, args.nicknames_path)
+
+def init(sandbox = False, nicknames_path = default_nicknames_path):
+ global con, mturk_website, nicknames, original_nicknames
+
+ mturk_website = 'workersandbox.mturk.com' if sandbox else 'www.mturk.com'
+ con = boto.mturk.connection.MTurkConnection(
+ host = 'mechanicalturk.sandbox.amazonaws.com' if sandbox else 'mechanicalturk.amazonaws.com')
+
+ try:
+ nicknames = unjson(nicknames_path)
+ except IOError:
+ nicknames = {}
+ original_nicknames = nicknames.copy()
+
+def save_nicknames(nicknames_path = default_nicknames_path):
+ if nicknames != original_nicknames:
+ with open(nicknames_path, 'w') as o:
+ json.dump(nicknames, o, sort_keys = True, indent = 4)
+ print >>o
+
+time_units = dict(s = 1, min = 60, h = 60 * 60, d = 24 * 60 * 60)
+def parse_duration(s):
+ '''Parses durations like "2 d", "48 h", "2880 min",
+"172800 s", or "172800".'''
+ x = s.split()
+ return int(x[0]) * time_units['s' if len(x) == 1 else x[1]]
+def display_duration(n):
+ for unit, m in sorted(time_units.items(), key = lambda x: -x[1]):
+ if n % m == 0:
+ return '{} {}'.format(n / m, unit)
+
+def parse_qualification(s):
+ '''Parses qualifications like "PercentAssignmentsApproved > 90" and "Locale == US".'''
+ name, comparator, value = s.split()
+ f = dict(
+ PercentAssignmentsApproved = boto.mturk.qualification.PercentAssignmentsApprovedRequirement,
+ Locale = boto.mturk.qualification.LocaleRequirement)[name]
+ comparator = {v : k for k, v in dict(
+ LessThan = '<', LessThanOrEqualTo = '<=',
+ GreaterThan = '>', GreaterThanOrEqualTo = '>=',
+ EqualTo = '==', NotEqualTo = '!=').items()}[comparator]
+ return f(comparator, value, required_to_preview = False)
+
+def preview_url(hit):
+ return 'https://{}/mturk/preview?groupId={}'.format(
+ mturk_website, hit.HITTypeId)
+
+def parse_timestamp(s):
+ '''Takes a timestamp like "2012-11-24T16:34:41Z".
+
+Returns a datetime object in the local time zone.'''
+ return datetime.datetime.fromtimestamp(
+ calendar.timegm(
+ datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ').timetuple()))
+
+def get_hitid(nickname_or_hitid):
+ return nicknames.get(nickname_or_hitid) or nickname_or_hitid
+
+def get_nickname(hitid):
+ for k, v in nicknames.items():
+ if v == hitid:
+ return k
+ return None
+
+def display_datetime(dt):
+ return dt.strftime('%e %b %Y, %l:%M %P')
+
+def display_hit(hit, verbose = False):
+ et = parse_timestamp(hit.Expiration)
+ return '\n'.join([
+ '{} - {} ({}, {}, {})'.format(
+ get_nickname(hit.HITId),
+ hit.Title,
+ hit.FormattedPrice,
+ display_duration(int(hit.AssignmentDurationInSeconds)),
+ hit.HITStatus),
+ 'HIT ID: ' + hit.HITId,
+ 'Type ID: ' + hit.HITTypeId,
+ 'Group ID: ' + hit.HITGroupId,
+ 'Preview: ' + preview_url(hit),
+ 'Created {} {}'.format(
+ display_datetime(parse_timestamp(hit.CreationTime)),
+ 'Expired' if et <= datetime.datetime.now() else
+ 'Expires ' + display_datetime(et)),
+ 'Assignments: {} -- {} avail, {} pending, {} reviewable, {} reviewed'.format(
+ hit.MaxAssignments,
+ hit.NumberOfAssignmentsAvailable,
+ hit.NumberOfAssignmentsPending,
+ int(hit.MaxAssignments) - (int(hit.NumberOfAssignmentsAvailable) + int(hit.NumberOfAssignmentsPending) + int(hit.NumberOfAssignmentsCompleted)),
+ hit.NumberOfAssignmentsCompleted)
+ if hasattr(hit, 'NumberOfAssignmentsAvailable')
+ else 'Assignments: {} total'.format(hit.MaxAssignments),
+ # For some reason, SearchHITs includes the
+ # NumberOfAssignmentsFoobar fields but GetHIT doesn't.
+ ] + ([] if not verbose else [
+ '\nDescription: ' + hit.Description,
+ '\nKeywords: ' + hit.Keywords
+ ])) + '\n'
+
+def digest_assignment(a):
+ return dict(
+ answers = {str(x.qid): str(x.fields[0]) for x in a.answers[0]},
+ **{k: str(getattr(a, k)) for k in (
+ 'AcceptTime', 'SubmitTime',
+ 'HITId', 'AssignmentId', 'WorkerId',
+ 'AssignmentStatus')})
+
+# --------------------------------------------------
+# Commands
+# --------------------------------------------------
+
+def get_balance():
+ return con.get_account_balance()
+
+def show_hit(hit):
+ return display_hit(con.get_hit(hit)[0], verbose = True)
+
+def list_hits():
+ 'Lists your 10 most recently created HITs, with the most recent last.'
+ return '\n'.join(reversed(map(display_hit, con.search_hits(
+ sort_by = 'CreationTime',
+ sort_direction = 'Descending',
+ page_size = 10))))
+
+def make_hit(title, description, keywords, reward, question_url, question_frame_height, duration, assignments, approval_delay, lifetime, qualifications = []):
+ r = con.create_hit(
+ title = title,
+ description = description,
+ keywords = con.get_keywords_as_string(keywords),
+ reward = con.get_price_as_price(reward),
+ question = boto.mturk.question.ExternalQuestion(
+ question_url,
+ question_frame_height),
+ duration = parse_duration(duration),
+ qualifications = boto.mturk.qualification.Qualifications(
+ map(parse_qualification, qualifications)),
+ max_assignments = assignments,
+ approval_delay = parse_duration(approval_delay),
+ lifetime = parse_duration(lifetime))
+ nick = None
+ available_nicks = nickname_pool - set(nicknames.keys())
+ if available_nicks:
+ nick = min(available_nicks)
+ nicknames[nick] = r[0].HITId
+ if interactive:
+ print 'Nickname:', nick
+ print 'HIT ID:', r[0].HITId
+ print 'Preview:', preview_url(r[0])
+ else:
+ return r[0]
+
+def extend_hit(hit, assignments_increment = None, expiration_increment = None):
+ con.extend_hit(hit, assignments_increment, expiration_increment)
+
+def expire_hit(hit):
+ con.expire_hit(hit)
+
+def delete_hit(hit):
+ '''Deletes a HIT using DisableHIT.
+
+Unreviewed assignments get automatically approved. Unsubmitted
+assignments get automatically approved upon submission.
+
+The API docs say DisableHIT doesn't work with Reviewable HITs,
+but apparently, it does.'''
+ con.disable_hit(hit)
+ global nicknames
+ nicknames = {k: v for k, v in nicknames.items() if v != hit}
+
+def list_assignments(hit, only_reviewable = False):
+ assignments = map(digest_assignment, con.get_assignments(
+ hit_id = hit,
+ page_size = 100,
+ status = 'Submitted' if only_reviewable else None))
+ if interactive:
+ print json.dumps(assignments, sort_keys = True, indent = 4)
+ print ' '.join([a['AssignmentId'] for a in assignments])
+ print ' '.join([a['WorkerId'] + ',' + a['AssignmentId'] for a in assignments])
+ else:
+ return assignments
+
+def grant_bonus(message, amount, pairs):
+ for worker, assignment in pairs:
+ con.grant_bonus(worker, assignment, con.get_price_as_price(amount), message)
+ if interactive: print 'Bonused', worker
+
+def approve_assignments(message, assignments):
+ for a in assignments:
+ con.approve_assignment(a, message)
+ if interactive: print 'Approved', a
+
+def reject_assignments(message, assignments):
+ for a in assignments:
+ con.reject_assignment(a, message)
+ if interactive: print 'Rejected', a
+
+def unreject_assignments(message, assignments):
+ for a in assignments:
+ con.approve_rejected_assignment(a, message)
+ if interactive: print 'Unrejected', a
+
+# --------------------------------------------------
+# Mainline code
+# --------------------------------------------------
+
+if __name__ == '__main__':
+ interactive = True
+
+ parser = argparse.ArgumentParser()
+ add_argparse_arguments(parser)
+ subs = parser.add_subparsers()
+
+ sub = subs.add_parser('bal',
+ help = 'display your prepaid balance')
+ sub.set_defaults(f = get_balance, a = lambda: [])
+
+ sub = subs.add_parser('hit',
+ help = 'get information about a HIT')
+ sub.add_argument('hit',
+ help = 'nickname or ID of the HIT to show')
+ sub.set_defaults(f = show_hit, a = lambda:
+ [get_hitid(args.hit)])
+
+ sub = subs.add_parser('hits',
+ help = 'list all your HITS')
+ sub.set_defaults(f = list_hits, a = lambda: [])
+
+ sub = subs.add_parser('new',
+ help = 'create a new HIT (external questions only)',
+ epilog = example_config_file,
+ formatter_class = argparse.RawDescriptionHelpFormatter)
+ sub.add_argument('json_path',
+ help = 'path to JSON configuration file for the HIT')
+ sub.add_argument('-u', '--question-url', dest = 'question_url',
+ metavar = 'URL',
+ help = 'URL for the external question')
+ sub.add_argument('-a', '--assignments', dest = 'assignments',
+ type = int, metavar = 'N',
+ help = 'number of assignments')
+ sub.add_argument('-r', '--reward', dest = 'reward',
+ type = float, metavar = 'PRICE',
+ help = 'reward amount, in USD')
+ sub.set_defaults(f = make_hit, a = lambda: dict(
+ unjson(args.json_path).items() + [(k, getattr(args, k))
+ for k in ('question_url', 'assignments', 'reward')
+ if getattr(args, k) is not None]))
+
+ sub = subs.add_parser('extend',
+ help = 'add assignments or time to a HIT')
+ sub.add_argument('hit',
+ help = 'nickname or ID of the HIT to extend')
+ sub.add_argument('-a', '--assignments', dest = 'assignments',
+ metavar = 'N', type = int,
+ help = 'number of assignments to add')
+ sub.add_argument('-t', '--time', dest = 'time',
+ metavar = 'T',
+ help = 'amount of time to add to the expiration date')
+ sub.set_defaults(f = extend_hit, a = lambda:
+ [get_hitid(args.hit), args.assignments,
+ args.time and parse_duration(args.time)])
+
+ sub = subs.add_parser('expire',
+ help = 'force a HIT to expire without deleting it')
+ sub.add_argument('hit',
+ help = 'nickname or ID of the HIT to expire')
+ sub.set_defaults(f = expire_hit, a = lambda:
+ [get_hitid(args.hit)])
+
+ sub = subs.add_parser('rm',
+ help = 'delete a HIT')
+ sub.add_argument('hit',
+ help = 'nickname or ID of the HIT to delete')
+ sub.set_defaults(f = delete_hit, a = lambda:
+ [get_hitid(args.hit)])
+
+ sub = subs.add_parser('as',
+ help = "list a HIT's submitted assignments")
+ sub.add_argument('hit',
+ help = 'nickname or ID of the HIT to get assignments for')
+ sub.add_argument('-r', '--reviewable', dest = 'only_reviewable',
+ action = 'store_true',
+ help = 'show only unreviewed assignments')
+ sub.set_defaults(f = list_assignments, a = lambda:
+ [get_hitid(args.hit), args.only_reviewable])
+
+ for command, fun, helpmsg in [
+ ('approve', approve_assignments, 'approve assignments'),
+ ('reject', reject_assignments, 'reject assignments'),
+ ('unreject', unreject_assignments, 'approve previously rejected assignments')]:
+ sub = subs.add_parser(command, help = helpmsg)
+ sub.add_argument('assignment', nargs = '+',
+ help = 'ID of an assignment')
+ sub.add_argument('-m', '--message', dest = 'message',
+ metavar = 'TEXT',
+ help = 'feedback message shown to workers')
+ sub.set_defaults(f = fun, a = lambda:
+ [args.message, args.assignment])
+
+ sub = subs.add_parser('bonus',
+ help = 'give a worker a bonus')
+ sub.add_argument('amount', type = float,
+ help = 'bonus amount, in USD')
+ sub.add_argument('message',
+ help = 'the reason for the bonus (shown to workers in an email sent by MTurk)')
+ sub.add_argument('widaid', nargs = '+',
+ help = 'a WORKER_ID,ASSIGNMENT_ID pair')
+ sub.set_defaults(f = grant_bonus, a = lambda:
+ [args.message, args.amount,
+ [p.split(',') for p in args.widaid]])
+
+ args = parser.parse_args()
+
+ init_by_args(args)
+
+ f = args.f
+ a = args.a()
+ if isinstance(a, dict):
+ # We do some introspective gymnastics so we can produce a
+ # less incomprehensible error message if some arguments
+ # are missing.
+ spec = inspect.getargspec(f)
+ missing = set(spec.args[: len(spec.args) - len(spec.defaults)]) - set(a.keys())
+ if missing:
+ raise ValueError('Missing arguments: ' + ', '.join(missing))
+ doit = lambda: f(**a)
+ else:
+ doit = lambda: f(*a)
+
+ try:
+ x = doit()
+ except boto.mturk.connection.MTurkRequestError as e:
+ print 'MTurk error:', e.error_message
+ sys.exit(1)
+
+ if x is not None:
+ print x
+
+ save_nicknames()
View
323 bin/s3multiput
@@ -1,323 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish, dis-
-# tribute, sublicense, and/or sell copies of the Software, and to permit
-# persons to whom the Software is furnished to do so, subject to the fol-
-# lowing conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
-# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-#
-
-# multipart portions copyright Fabian Topfstedt
-# https://gist.github.com/924094
-
-
-import math
-import mimetypes
-from multiprocessing import Pool
-import getopt, sys, os
-
-import boto
-from boto.exception import S3ResponseError
-
-from boto.s3.connection import S3Connection
-from filechunkio import FileChunkIO
-
-usage_string = """
-SYNOPSIS
- s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
- -b/--bucket <bucket_name> [-c/--callback <num_cb>]
- [-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
- [-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
- [-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced] path
-
- Where
- access_key - Your AWS Access Key ID. If not supplied, boto will
- use the value of the environment variable
- AWS_ACCESS_KEY_ID
- secret_key - Your AWS Secret Access Key. If not supplied, boto
- will use the value of the environment variable
- AWS_SECRET_ACCESS_KEY
- bucket_name - The name of the S3 bucket the file(s) should be
- copied to.
- path - A path to a directory or file that represents the items
- to be uploaded. If the path points to an individual file,
- that file will be uploaded to the specified bucket. If the
- path points to a directory, s3_it will recursively traverse
- the directory and upload all files to the specified bucket.
- debug_level - 0 means no debug output (default), 1 means normal
- debug output from boto, and 2 means boto debug output
- plus request/response output from httplib
- ignore_dirs - a comma-separated list of directory names that will
- be ignored and not uploaded to S3.
- num_cb - The number of progress callbacks to display. The default
- is zero which means no callbacks. If you supplied a value
- of "-c 10" for example, the progress callback would be
- called 10 times for each file transferred.
- prefix - A file path prefix that will be stripped from the full
- path of the file when determining the key name in S3.
- For example, if the full path of a file is:
- /home/foo/bar/fie.baz
- and the prefix is specified as "-p /home/foo/" the
- resulting key name in S3 will be:
- /bar/fie.baz
- The prefix must end in a trailing separator and if it
- does not then one will be added.
- key_prefix - A prefix to be added to the S3 key name, after any
- stripping of the file path is done based on the
- "-p/--prefix" option.
- reduced - Use Reduced Redundancy storage
- grant - A canned ACL policy that will be granted on each file
- transferred to S3. The value of provided must be one
- of the "canned" ACL policies supported by S3:
- private|public-read|public-read-write|authenticated-read
- no_overwrite - No files will be overwritten on S3, if the file/key
- exists on s3 it will be kept. This is useful for
- resuming interrupted transfers. Note this is not a
- sync, even if the file has been updated locally if
- the key exists on s3 the file on s3 will not be
- updated.
-
- If the -n option is provided, no files will be transferred to S3 but
- informational messages will be printed about what would happen.
-"""
-def usage():
- print usage_string
- sys.exit()
-
-def submit_cb(bytes_so_far, total_bytes):
- print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)
-
-def get_key_name(fullpath, prefix, key_prefix):
- key_name = fullpath[len(prefix):]
- l = key_name.split(os.sep)
- return key_prefix + '/'.join(l)
-
-def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
- source_path, offset, bytes, debug, cb, num_cb, amount_of_retries=10):
- if debug == 1:
- print "_upload_part(%s, %s, %s)" % (source_path, offset, bytes)
- """
- Uploads a part with retries.
- """
- def _upload(retries_left=amount_of_retries):
- try:
- if debug == 1:
- print 'Start uploading part #%d ...' % part_num
- conn = S3Connection(aws_key, aws_secret)
- conn.debug = debug
- bucket = conn.get_bucket(bucketname)
- for mp in bucket.get_all_multipart_uploads():
- if mp.id == multipart_id:
- with FileChunkIO(source_path, 'r', offset=offset,
- bytes=bytes) as fp:
- mp.upload_part_from_file(fp=fp, part_num=part_num, cb=cb, num_cb=num_cb)
- break
- except Exception, exc:
- if retries_left:
- _upload(retries_left=retries_left - 1)
- else:
- print 'Failed uploading part #%d' % part_num
- raise exc
- else:
- if debug == 1:
- print '... Uploaded part #%d' % part_num
-
- _upload()
-
-def upload(bucketname, aws_key, aws_secret, source_path, keyname,
- reduced, debug, cb, num_cb,
- acl='private', headers={}, guess_mimetype=True, parallel_processes=4):
- """
- Parallel multipart upload.
- """
- conn = S3Connection(aws_key, aws_secret)
- conn.debug = debug
- bucket = conn.get_bucket(bucketname)
-
- if guess_mimetype:
- mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
- headers.update({'Content-Type': mtype})
-
- mp = bucket.initiate_multipart_upload(keyname, headers=headers, reduced_redundancy=reduced)
-
- source_size = os.stat(source_path).st_size
- bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
- 5242880)
- chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
-
- pool = Pool(processes=parallel_processes)
- for i in range(chunk_amount):
- offset = i * bytes_per_chunk
- remaining_bytes = source_size - offset
- bytes = min([bytes_per_chunk, remaining_bytes])
- part_num = i + 1
- pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
- part_num, source_path, offset, bytes, debug, cb, num_cb])
- pool.close()
- pool.join()
-
- if len(mp.get_all_parts()) == chunk_amount:
- mp.complete_upload()
- key = bucket.get_key(keyname)
- key.set_acl(acl)
- else:
- mp.cancel_upload()
-
-
-def main():
-
- # default values
- aws_access_key_id = None
- aws_secret_access_key = None
- bucket_name = ''
- ignore_dirs = []
- total = 0
- debug = 0
- cb = None
- num_cb = 0
- quiet = False
- no_op = False
- prefix = '/'
- key_prefix = ''
- grant = None
- no_overwrite = False
- reduced = False
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
- ['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
- 'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet', 'secret_key=',
- 'no_overwrite', 'reduced'])
- except:
- usage()
-
- # parse opts
- for o, a in opts:
- if o in ('-h', '--help'):
- usage()
- if o in ('-a', '--access_key'):
- aws_access_key_id = a
- if o in ('-b', '--bucket'):
- bucket_name = a
- if o in ('-c', '--callback'):
- num_cb = int(a)
- cb = submit_cb
- if o in ('-d', '--debug'):
- debug = int(a)
- if o in ('-g', '--grant'):
- grant = a
- if o in ('-i', '--ignore'):
- ignore_dirs = a.split(',')
- if o in ('-n', '--no_op'):
- no_op = True
- if o in ('w', '--no_overwrite'):
- no_overwrite = True
- if o in ('-p', '--prefix'):
- prefix = a
- if prefix[-1] != os.sep:
- prefix = prefix + os.sep
- if o in ('-k', '--key_prefix'):
- key_prefix = a
- if o in ('-q', '--quiet'):
- quiet = True
- if o in ('-s', '--secret_key'):
- aws_secret_access_key = a
- if o in ('-r', '--reduced'):
- reduced = True
-
- if len(args) != 1:
- usage()
-
-
- path = os.path.expanduser(args[0])
- path = os.path.expandvars(path)
- path = os.path.abspath(path)
-
- if not bucket_name:
- print "bucket name is required!"
- usage()
-
- c = boto.connect_s3(aws_access_key_id=aws_access_key_id,
- aws_secret_access_key=aws_secret_access_key)
- c.debug = debug
- b = c.get_bucket(bucket_name)
-
- # upload a directory of files recursively
- if os.path.isdir(path):
- if no_overwrite:
- if not quiet:
- print 'Getting list of existing keys to check against'
- keys = []
- for key in b.list(get_key_name(path, prefix, key_prefix)):
- keys.append(key.name)
- for root, dirs, files in os.walk(path):
- for ignore in ignore_dirs:
- if ignore in dirs:
- dirs.remove(ignore)
- for file in files:
- fullpath = os.path.join(root, file)
- key_name = get_key_name(fullpath, prefix, key_prefix)
- copy_file = True
- if no_overwrite:
- if key_name in keys:
- copy_file = False
- if not quiet:
- print 'Skipping %s as it exists in s3' % file
-
- if copy_file:
- if not quiet:
- print 'Copying %s to %s/%s' % (file, bucket_name, key_name)
-
- if not no_op:
- if os.stat(fullpath).st_size == 0:
- # 0-byte files don't work and also don't need multipart upload
- k = b.new_key(key_name)
- k.set_contents_from_filename(fullpath, cb=cb, num_cb=num_cb,
- policy=grant, reduced_redundancy=reduced)
- else:
- upload(bucket_name, aws_access_key_id,
- aws_secret_access_key, fullpath, key_name,
- reduced, debug, cb, num_cb, grant or 'private')
- total += 1
-
- # upload a single file
- elif os.path.isfile(path):
- key_name = get_key_name(os.path.abspath(path), prefix, key_prefix)
- copy_file = True
- if no_overwrite:
- if b.get_key(key_name):
- copy_file = False
- if not quiet:
- print 'Skipping %s as it exists in s3' % path
-
- if copy_file:
- if not quiet:
- print 'Copying %s to %s/%s' % (path, bucket_name, key_name)
-
- if not no_op:
- if os.stat(path).st_size == 0:
- # 0-byte files don't work and also don't need multipart upload
- k = b.new_key(key_name)
- k.set_contents_from_filename(path, cb=cb, num_cb=num_cb, policy=grant,
- reduced_redundancy=reduced)
- else:
- upload(bucket_name, aws_access_key_id,
- aws_secret_access_key, path, key_name,
- reduced, debug, cb, num_cb, grant or 'private')
-
-if __name__ == "__main__":
- main()
View
307 bin/s3put
@@ -15,22 +15,45 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-import getopt, sys, os
+import getopt
+import sys
+import os
import boto
-from boto.exception import S3ResponseError
+
+try:
+ # multipart portions copyright Fabian Topfstedt
+ # https://gist.github.com/924094
+
+ import math
+ import mimetypes
+ from multiprocessing import Pool
+ from boto.s3.connection import S3Connection
+ from filechunkio import FileChunkIO
+ multipart_capable = True
+ usage_flag_multipart_capable = """ [--multipart]"""
+ usage_string_multipart_capable = """
+ multipart - Upload files as multiple parts. This needs filechunkio."""
+except ImportError as err:
+ multipart_capable = False
+ usage_flag_multipart_capable = ""
+ usage_string_multipart_capable = '\n\n "' + \
+ err.message[len('No module named '):] + \
+ '" is missing for multipart support '
+
usage_string = """
SYNOPSIS
s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
-b/--bucket <bucket_name> [-c/--callback <num_cb>]
[-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
- [-n/--no_op] [-p/--prefix <prefix>] [-q/--quiet]
- [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced] path
+ [-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
+ [-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced]
+ [--header] """ + usage_string_multipart_capable + """ path [path...]
Where
access_key - Your AWS Access Key ID. If not supplied, boto will
@@ -64,63 +87,172 @@ SYNOPSIS
/bar/fie.baz
The prefix must end in a trailing separator and if it
does not then one will be added.
+ key_prefix - A prefix to be added to the S3 key name, after any
+ stripping of the file path is done based on the
+ "-p/--prefix" option.
+ reduced - Use Reduced Redundancy storage
grant - A canned ACL policy that will be granted on each file
transferred to S3. The value of provided must be one
of the "canned" ACL policies supported by S3:
private|public-read|public-read-write|authenticated-read
- no_overwrite - No files will be overwritten on S3, if the file/key
- exists on s3 it will be kept. This is useful for
- resuming interrupted transfers. Note this is not a
- sync, even if the file has been updated locally if
- the key exists on s3 the file on s3 will not be
+ no_overwrite - No files will be overwritten on S3, if the file/key
+ exists on s3 it will be kept. This is useful for
+ resuming interrupted transfers. Note this is not a
+ sync, even if the file has been updated locally if
+ the key exists on s3 the file on s3 will not be
updated.
- reduced - Use Reduced Redundancy storage
+ header - key=value paris of extra header(s) to pass along in the
+ request""" + usage_string_multipart_capable + """
If the -n option is provided, no files will be transferred to S3 but
informational messages will be printed about what would happen.
"""
+
+
def usage():
print usage_string
sys.exit()
-
+
+
def submit_cb(bytes_so_far, total_bytes):
print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)
-def get_key_name(fullpath, prefix):
- key_name = fullpath[len(prefix):]
+
+def get_key_name(fullpath, prefix, key_prefix):
+ if fullpath.startswith(prefix):
+ key_name = fullpath[len(prefix):]
+ else:
+ key_name = fullpath
l = key_name.split(os.sep)
- return '/'.join(l)
+ return key_prefix + '/'.join(l)
+
+
+def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
+ source_path, offset, bytes, debug, cb, num_cb,
+ amount_of_retries=10):
+ """
+ Uploads a part with retries.
+ """
+ if debug == 1:
+ print "_upload_part(%s, %s, %s)" % (source_path, offset, bytes)
+
+ def _upload(retries_left=amount_of_retries):
+ try:
+ if debug == 1:
+ print 'Start uploading part #%d ...' % part_num
+ conn = S3Connection(aws_key, aws_secret)
+ conn.debug = debug
+ bucket = conn.get_bucket(bucketname)
+ for mp in bucket.get_all_multipart_uploads():
+ if mp.id == multipart_id:
+ with FileChunkIO(source_path, 'r', offset=offset,
+ bytes=bytes) as fp:
+ mp.upload_part_from_file(fp=fp, part_num=part_num,
+ cb=cb, num_cb=num_cb)
+ break
+ except Exception, exc:
+ if retries_left:
+ _upload(retries_left=retries_left - 1)
+ else:
+ print 'Failed uploading part #%d' % part_num
+ raise exc
+ else:
+ if debug == 1:
+ print '... Uploaded part #%d' % part_num
+
+ _upload()
+
+
+def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname,
+ reduced, debug, cb, num_cb, acl='private', headers={},
+ guess_mimetype=True, parallel_processes=4):
+ """
+ Parallel multipart upload.
+ """
+ conn = S3Connection(aws_key, aws_secret)
+ conn.debug = debug
+ bucket = conn.get_bucket(bucketname)
+
+ if guess_mimetype:
+ mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
+ headers.update({'Content-Type': mtype})
+
+ mp = bucket.initiate_multipart_upload(keyname, headers=headers,
+ reduced_redundancy=reduced)
+
+ source_size = os.stat(source_path).st_size
+ bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
+ 5242880)
+ chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
+
+ pool = Pool(processes=parallel_processes)
+ for i in range(chunk_amount):
+ offset = i * bytes_per_chunk
+ remaining_bytes = source_size - offset
+ bytes = min([bytes_per_chunk, remaining_bytes])
+ part_num = i + 1
+ pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
+ part_num, source_path, offset, bytes,
+ debug, cb, num_cb])
+ pool.close()
+ pool.join()
+
+ if len(mp.get_all_parts()) == chunk_amount:
+ mp.complete_upload()
+ key = bucket.get_key(keyname)
+ key.set_acl(acl)
+ else:
+ mp.cancel_upload()
+
+
+def singlepart_upload(bucket, key_name, fullpath, *kargs, **kwargs):
+ """
+ Single upload.
+ """
+ k = bucket.new_key(key_name)
+ k.set_contents_from_filename(fullpath, *kargs, **kwargs)
+
+
+def expand_path(path):
+ path = os.path.expanduser(path)
+ path = os.path.expandvars(path)
+ return os.path.abspath(path)
+
def main():
- try:
- opts, args = getopt.getopt(
- sys.argv[1:], 'a:b:c::d:g:hi:np:qs:vwr',
- ['access_key=', 'bucket=', 'callback=', 'debug=', 'help',
- 'grant=', 'ignore=', 'no_op', 'prefix=', 'quiet',
- 'secret_key=', 'no_overwrite', 'reduced', "header="]
- )
- except:
- usage()
- ignore_dirs = []
+
+ # default values
aws_access_key_id = None
aws_secret_access_key = None
bucket_name = ''
- total = 0
+ ignore_dirs = []
debug = 0
cb = None
num_cb = 0
quiet = False
no_op = False
prefix = '/'
+ key_prefix = ''
grant = None
no_overwrite = False
reduced = False
headers = {}
+ multipart_requested = False
+
+ try:
+ opts, args = getopt.getopt(
+ sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
+ ['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
+ 'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet',
+ 'secret_key=', 'no_overwrite', 'reduced', 'header=', 'multipart'])
+ except:
+ usage()
+
+ # parse opts
for o, a in opts:
if o in ('-h', '--help'):
usage()
- sys.exit()
if o in ('-a', '--access_key'):
aws_access_key_id = a
if o in ('-b', '--bucket'):
@@ -138,78 +270,95 @@ def main():
no_op = True
if o in ('-w', '--no_overwrite'):
no_overwrite = True
- if o in ('-r', '--reduced'):
- reduced = True
if o in ('-p', '--prefix'):
prefix = a
if prefix[-1] != os.sep:
prefix = prefix + os.sep
+ prefix = expand_path(prefix)
+ if o in ('-k', '--key_prefix'):
+ key_prefix = a
if o in ('-q', '--quiet'):
quiet = True
if o in ('-s', '--secret_key'):
aws_secret_access_key = a
+ if o in ('-r', '--reduced'):
+ reduced = True
if o in ('--header'):
- (k,v) = a.split("=")
+ (k, v) = a.split("=")
headers[k] = v
- if len(args) != 1:
- print usage()
- path = os.path.expanduser(args[0])
- path = os.path.expandvars(path)
- path = os.path.abspath(path)
- if bucket_name:
- c = boto.connect_s3(aws_access_key_id=aws_access_key_id,
- aws_secret_access_key=aws_secret_access_key)
- c.debug = debug
- b = c.get_bucket(bucket_name)
+ if o in ('--multipart'):
+ if multipart_capable:
+ multipart_requested = True
+ else:
+ print "multipart upload requested but not capable"
+ sys.exit()
+
+ if len(args) < 1:
+ usage()
+
+ if not bucket_name:
+ print "bucket name is required!"
+ usage()
+
+ c = boto.connect_s3(aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key)
+ c.debug = debug
+ b = c.get_bucket(bucket_name)
+ existing_keys_to_check_against = []
+ files_to_check_for_upload = []
+
+ for path in args:
+ path = expand_path(path)
+ # upload a directory of files recursively
if os.path.isdir(path):
if no_overwrite:
if not quiet:
print 'Getting list of existing keys to check against'
- keys = []
- for key in b.list(get_key_name(path, prefix)):
- keys.append(key.name)
+ for key in b.list(get_key_name(path, prefix, key_prefix)):
+ existing_keys_to_check_against.append(key.name)
for root, dirs, files in os.walk(path):
for ignore in ignore_dirs:
if ignore in dirs:
dirs.remove(ignore)
- for file in files:
- if file.startswith("."):
+ for path in files:
+ if path.startswith("."):
continue
- fullpath = os.path.join(root, file)
- key_name = get_key_name(fullpath, prefix)
- copy_file = True
- if no_overwrite:
- if key_name in keys:
- copy_file = False
- if not quiet:
- print 'Skipping %s as it exists in s3' % file
- if copy_file:
- if not quiet:
- print 'Copying %s to %s/%s' % (file, bucket_name, key_name)
- if not no_op:
- k = b.new_key(key_name)
- k.set_contents_from_filename(
- fullpath, cb=cb, num_cb=num_cb,
- policy=grant, reduced_redundancy=reduced,
- headers=headers
- )
- total += 1
+ files_to_check_for_upload.append(os.path.join(root, path))
+
+ # upload a single file
elif os.path.isfile(path):
- key_name = get_key_name(path, prefix)
- copy_file = True
- if no_overwrite:
- if b.get_key(key_name):
- copy_file = False
- if not quiet:
- print 'Skipping %s as it exists in s3' % path
- if copy_file:
- k = b.new_key(key_name)
- k.set_contents_from_filename(path, cb=cb, num_cb=num_cb,
- policy=grant,
- reduced_redundancy=reduced, headers=headers)
- else:
- print usage()
+ fullpath = os.path.abspath(path)
+ key_name = get_key_name(fullpath, prefix, key_prefix)
+ files_to_check_for_upload.append(fullpath)
+ existing_keys_to_check_against.append(key_name)
+
+ # we are trying to upload something unknown
+ else:
+ print "I don't know what %s is, so i can't upload it" % path
+
+ for fullpath in files_to_check_for_upload:
+ key_name = get_key_name(fullpath, prefix, key_prefix)
+
+ if no_overwrite and key_name in existing_keys_to_check_against:
+ if not quiet:
+ print 'Skipping %s as it exists in s3' % fullpath
+ continue
+
+ if not quiet:
+ print 'Copying %s to %s/%s' % (fullpath, bucket_name, key_name)
+
+ if not no_op:
+ # 0-byte files don't work and also don't need multipart upload
+ if os.stat(fullpath).st_size != 0 and multipart_capable and \
+ multipart_requested:
+ multipart_upload(bucket_name, aws_access_key_id,
+ aws_secret_access_key, fullpath, key_name,
+ reduced, debug, cb, num_cb,
+ grant or 'private', headers)
+ else:
+ singlepart_upload(b, key_name, fullpath, cb=cb, num_cb=num_cb,
+ policy=grant, reduced_redundancy=reduced,
+ headers=headers)
if __name__ == "__main__":
main()
-
View
2 boto/__init__.py
@@ -35,7 +35,7 @@
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.6.0'
+__version__ = '2.7.0'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
View
96 boto/auth.py
@@ -264,7 +264,7 @@ def string_to_sign(self, http_request):
headers_to_sign = self.headers_to_sign(http_request)
canonical_headers = self.canonical_headers(headers_to_sign)
string_to_sign = '\n'.join([http_request.method,
- http_request.path,
+ http_request.auth_path,
'',
canonical_headers,
'',
@@ -303,9 +303,15 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
capability = ['hmac-v4']
- def __init__(self, host, config, provider):
+ def __init__(self, host, config, provider,
+ service_name=None, region_name=None):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
+ # You can set the service_name and region_name to override the
+ # values which would otherwise come from the endpoint, e.g.
+ # <service>.<region>.amazonaws.com.
+ self.service_name = service_name
+ self.region_name = region_name
def _sign(self, key, msg, hex=False):
if hex:
@@ -337,12 +343,15 @@ def query_string(self, http_request):
return '&'.join(pairs)
def canonical_query_string(self, http_request):
+ # POST requests pass parameters in through the
+ # http_request.body field.
+ if http_request.method == 'POST':
+ return ""
l = []
- for param in http_request.params:
+ for param in sorted(http_request.params):
value = str(http_request.params[param])
l.append('%s=%s' % (urllib.quote(param, safe='-_.~'),
urllib.quote(value, safe='-_.~')))
- l = sorted(l)
return '&'.join(l)
def canonical_headers(self, headers_to_sign):
@@ -352,9 +361,9 @@ def canonical_headers(self, headers_to_sign):
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
- l = ['%s:%s' % (n.lower().strip(),
- headers_to_sign[n].strip()) for n in headers_to_sign]
- l = sorted(l)
+ l = sorted(['%s:%s' % (n.lower().strip(),
+ ' '.join(headers_to_sign[n].strip().split()))
+ for n in headers_to_sign])
return '\n'.join(l)
def signed_headers(self, headers_to_sign):
@@ -363,7 +372,7 @@ def signed_headers(self, headers_to_sign):
return ';'.join(l)
def canonical_uri(self, http_request):
- return http_request.path
+ return http_request.auth_path
def payload(self, http_request):
body = http_request.body
@@ -396,13 +405,26 @@ def credential_scope(self, http_request):
scope = []
http_request.timestamp = http_request.headers['X-Amz-Date'][0:8]
scope.append(http_request.timestamp)
+ # The service_name and region_name either come from:
+ # * The service_name/region_name attrs or (if these values are None)
+ # * parsed from the endpoint <service>.<region>.amazonaws.com.
parts = http_request.host.split('.')
- if len(parts) == 3:
- http_request.region_name = 'us-east-1'
+ if self.region_name is not None:
+ region_name = self.region_name
else:
- http_request.region_name = parts[1]
+ if len(parts) == 3:
+ region_name = 'us-east-1'
+ else:
+ region_name = parts[1]
+ if self.service_name is not None:
+ service_name = self.service_name
+ else:
+ service_name = parts[0]
+
+ http_request.service_name = service_name
+ http_request.region_name = region_name
+
scope.append(http_request.region_name)
- http_request.service_name = parts[0]
scope.append(http_request.service_name)
scope.append('aws4_request')
return '/'.join(scope)
@@ -443,6 +465,18 @@ def add_auth(self, req, **kwargs):
req.headers['X-Amz-Date'] = now.strftime('%Y%m%dT%H%M%SZ')
if self._provider.security_token:
req.headers['X-Amz-Security-Token'] = self._provider.security_token
+ qs = self.query_string(req)
+ if qs and req.method == 'POST':
+ # Stash request parameters into post body
+ # before we generate the signature.
+ req.body = qs
+ req.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
+ req.headers['Content-Length'] = str(len(req.body))
+ else:
+ # Safe to modify req.path here since
+ # the signature will use req.auth_path.
+ req.path = req.path.split('?')[0]
+ req.path = req.path + '?' + qs
canonical_request = self.canonical_request(req)
boto.log.debug('CanonicalRequest:\n%s' % canonical_request)
string_to_sign = self.string_to_sign(req, canonical_request)
@@ -454,10 +488,6 @@ def add_auth(self, req, **kwargs):
l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
l.append('Signature=%s' % signature)
req.headers['Authorization'] = ','.join(l)
- qs = self.query_string(req)
- if qs:
- req.path = req.path.split('?')[0]
- req.path = req.path + '?' + qs
class QuerySignatureHelper(HmacKeys):
@@ -519,6 +549,11 @@ class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler):
SignatureVersion = 1
capability = ['sign-v1', 'mturk']
+ def __init__(self, *args, **kw):
+ QuerySignatureHelper.__init__(self, *args, **kw)
+ AuthHandler.__init__(self, *args, **kw)
+ self._hmac_256 = None
+
def _calc_signature(self, params, *args):
boto.log.debug('using _calc_signature_1')
hmac = self._get_hmac()
@@ -612,8 +647,7 @@ def get_auth_handler(host, config, provider, requested_capability=None):
An implementation of AuthHandler.
Raises:
- boto.exception.NoAuthHandlerFound:
- boto.exception.TooManyAuthHandlerReadyToAuthenticate:
+ boto.exception.NoAuthHandlerFound
"""
ready_handlers = []
auth_handlers = boto.plugin.get_plugin(AuthHandler, requested_capability)
@@ -632,18 +666,14 @@ def get_auth_handler(host, config, provider, requested_capability=None):
' %s '
'Check your credentials' % (len(names), str(names)))
- if len(ready_handlers) > 1:
- # NOTE: Even though it would be nice to accept more than one handler
- # by using one of the many ready handlers, we are never sure that each
- # of them are referring to the same storage account. Since we cannot
- # easily guarantee that, it is always safe to fail, rather than operate
- # on the wrong account.
- names = [handler.__class__.__name__ for handler in ready_handlers]
- raise boto.exception.TooManyAuthHandlerReadyToAuthenticate(
- '%d AuthHandlers %s ready to authenticate for requested_capability '
- '%s, only 1 expected. This happens if you import multiple '
- 'pluging.Plugin implementations that declare support for the '
- 'requested_capability.' % (len(names), str(names),
- requested_capability))
-
- return ready_handlers[0]
+ # We select the last ready auth handler that was loaded, to allow users to
+ # customize how auth works in environments where there are shared boto
+ # config files (e.g., /etc/boto.cfg and ~/.boto): The more general,
+ # system-wide shared configs should be loaded first, and the user's
+ # customizations loaded last. That way, for example, the system-wide
+ # config might include a plugin_directory that includes a service account
+ # auth plugin shared by all users of a Google Compute Engine instance
+ # (allowing sharing of non-user data between various services), and the
+ # user could override this with a .boto config that includes user-specific
+ # credentials (for access to user data).
+ return ready_handlers[-1]
View
69 boto/beanstalk/__init__.py
@@ -0,0 +1,69 @@
+# Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from layer1 import Layer1
+from boto.regioninfo import RegionInfo
+
+RegionData = {
+ 'us-east-1': 'elasticbeanstalk.us-east-1.amazonaws.com',
+ 'us-west-1': 'elasticbeanstalk.us-west-1.amazonaws.com',
+ 'us-west-2': 'elasticbeanstalk.us-west-2.amazonaws.com',
+ 'sa-east-1': 'elasticbeanstalk.sa-east-1.amazonaws.com',
+ 'eu-west-1': 'elasticbeanstalk.eu-west-1.amazonaws.com',
+ 'ap-northeast-1': 'elasticbeanstalk.ap-northeast-1.amazonaws.com',
+ 'ap-southeast-1': 'elasticbeanstalk.ap-southeast-1.amazonaws.com',
+ 'ap-southeast-2': 'elasticbeanstalk.ap-southeast-2.amazonaws.com',
+}
+
+
+def regions():
+ """
+ Get all available regions for the Elastic Beanstalk service.
+
+ :rtype: list
+ :return: A list of :class:`boto.RegionInfo` instances
+ """
+ regions = []
+ for region_name in RegionData:
+ region = RegionInfo(name=region_name,
+ endpoint=RegionData[region_name],
+ connection_cls=Layer1)
+ regions.append(region)
+ return regions
+
+
+def connect_to_region(region_name, **kw_params):
+ """
+ Given a valid region name, return a
+ :class:`boto.beanstalk.Layer1`.
+
+ :param str region_name: The name of the region to connect to.
+
+ :rtype: :class:`boto.beanstalk.Layer1` or ``None``
+ :return: A connection to the given region, or None if an invalid region
+ name is given
+ """
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
View
4 boto/cloudformation/__init__.py
@@ -30,7 +30,9 @@
'sa-east-1': 'cloudformation.sa-east-1.amazonaws.com',
'eu-west-1': 'cloudformation.eu-west-1.amazonaws.com',
'ap-northeast-1': 'cloudformation.ap-northeast-1.amazonaws.com',
- 'ap-southeast-1': 'cloudformation.ap-southeast-1.amazonaws.com'}
+ 'ap-southeast-1': 'cloudformation.ap-southeast-1.amazonaws.com',
+ 'ap-southeast-2': 'cloudformation.ap-southeast-2.amazonaws.com',
+}
def regions():
View
18 boto/cloudformation/connection.py
@@ -19,17 +19,13 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-try:
- import simplejson as json
-except:
- import json
-
import boto
from boto.cloudformation.stack import Stack, StackSummary, StackEvent
from boto.cloudformation.stack import StackResource, StackResourceSummary
from boto.cloudformation.template import Template
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
+from boto.compat import json
class CloudFormationConnection(AWSQueryConnection):
@@ -42,9 +38,15 @@ class CloudFormationConnection(AWSQueryConnection):
DefaultRegionEndpoint = boto.config.get('Boto', 'cfn_region_endpoint',
'cloudformation.us-east-1.amazonaws.com')
- valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE",
- "ROLLBACK_IN_PROGRESS", "ROLLBACK_FAILED", "ROLLBACK_COMPLETE",
- "DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE")
+ valid_states = (
+ 'CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE',
+ 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE',
+ 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE',
+ 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
+ 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS',
+ 'UPDATE_ROLLBACK_FAILED',
+ 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
+ 'UPDATE_ROLLBACK_COMPLETE')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
View
8 boto/cloudformation/stack.py
@@ -200,6 +200,7 @@ def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self._current_key = None
+ self._current_value = None
def startElement(self, name, attrs, connection):
return None
@@ -208,9 +209,14 @@ def endElement(self, name, value, connection):
if name == "Key":
self._current_key = value
elif name == "Value":
- self[self._current_key] = value
+ self._current_value = value
else:
setattr(self, name, value)
+