Permalink
Browse files

Merge branch 'release-2.5.0'

  • Loading branch information...
2 parents fe2064f + 66863ea commit e20d3ea669c5efe832b22201c07e112f673b04fc @garnaat garnaat committed Jun 13, 2012
Showing with 2,924 additions and 1,208 deletions.
  1. +2 −2 README.rst
  2. +18 −3 bin/elbadmin
  3. +17 −11 bin/s3multiput
  4. +71 −63 boto/__init__.py
  5. +34 −28 boto/auth.py
  6. +69 −29 boto/cloudformation/connection.py
  7. +1 −1 boto/cloudformation/stack.py
  8. +43 −36 boto/cloudfront/__init__.py
  9. +53 −5 boto/connection.py
  10. +8 −3 boto/dynamodb/layer2.py
  11. +25 −14 boto/ec2/autoscale/__init__.py
  12. +25 −6 boto/ec2/autoscale/launchconfig.py
  13. +90 −92 boto/ec2/cloudwatch/__init__.py
  14. +4 −4 boto/ec2/cloudwatch/alarm.py
  15. +68 −27 boto/ec2/connection.py
  16. +84 −72 boto/ec2/elb/__init__.py
  17. +5 −0 boto/ec2/elb/listener.py
  18. +30 −17 boto/ec2/elb/loadbalancer.py
  19. +2 −0 boto/ec2/elb/policies.py
  20. +3 −3 boto/ec2/image.py
  21. +15 −8 boto/ec2/instance.py
  22. +1 −1 boto/ec2/securitygroup.py
  23. +2 −2 boto/ec2/snapshot.py
  24. +40 −41 boto/emr/connection.py
  25. +1 −2 boto/fps/connection.py
  26. +2 −2 boto/gs/acl.py
  27. +479 −198 boto/iam/connection.py
  28. +2 −2 boto/manage/cmdshell.py
  29. +3 −3 boto/manage/server.py
  30. +1 −1 boto/manage/task.py
  31. +6 −6 boto/manage/volume.py
  32. +18 −18 boto/mturk/connection.py
  33. +1 −1 boto/mturk/qualification.py
  34. +3 −3 boto/mturk/question.py
  35. +6 −5 boto/mws/connection.py
  36. +85 −58 boto/provider.py
  37. +31 −29 boto/rds/__init__.py
  38. +7 −7 boto/rds/parametergroup.py
  39. +2 −2 boto/roboto/awsqueryrequest.py
  40. +37 −20 boto/route53/connection.py
  41. +2 −3 boto/s3/bucket.py
  42. +59 −38 boto/s3/connection.py
  43. +4 −4 boto/s3/key.py
  44. +1 −2 boto/sdb/connection.py
  45. +4 −4 boto/sdb/db/key.py
  46. +1 −1 boto/sdb/db/manager/__init__.py
  47. +4 −4 boto/sdb/db/manager/pgmanager.py
  48. +68 −62 boto/sdb/db/manager/sdbmanager.py
  49. +2 −2 boto/sdb/db/manager/xmlmanager.py
  50. +3 −3 boto/sdb/db/model.py
  51. +82 −65 boto/sdb/db/property.py
  52. +5 −5 boto/sdb/db/sequence.py
  53. +1 −1 boto/sdb/db/test_db.py
  54. +1 −1 boto/sdb/domain.py
  55. +1 −1 boto/sdb/item.py
  56. +3 −4 boto/services/result.py
  57. +2 −2 boto/services/service.py
  58. +27 −12 boto/ses/connection.py
  59. +11 −1 boto/sns/connection.py
  60. +13 −0 boto/sqs/connection.py
  61. +2 −2 boto/sqs/message.py
  62. +15 −1 boto/swf/layer1.py
  63. +88 −45 boto/utils.py
  64. +1 −1 boto/vpc/dhcpoptions.py
  65. BIN docs/BotoCheatSheet.pdf
  66. +74 −23 docs/source/autoscale_tut.rst
  67. +40 −40 docs/source/cloudsearch_tut.rst
  68. +3 −3 docs/source/simpledb_tut.rst
  69. +2 −0 requirements.txt
  70. 0 tests/cloudformation/__init__.py
  71. +668 −0 tests/cloudformation/test_connection.py
  72. +4 −5 tests/db/test_lists.py
  73. +2 −2 tests/db/test_password.py
  74. +7 −7 tests/db/test_sequence.py
  75. +3 −3 tests/dynamodb/test_layer2.py
  76. +27 −1 tests/ec2/elb/test_connection.py
  77. +2 −2 tests/ec2/test_connection.py
  78. +98 −0 tests/ec2/test_instance.py
  79. +168 −0 tests/misc/test_expire.py
  80. +1 −1 tests/mturk/create_hit_external.py
  81. +1 −1 tests/mturk/create_hit_with_qualifications.py
  82. +7 −8 tests/mturk/support.py
  83. +1 −1 tests/s3/test_gsconnection.py
  84. +1 −1 tests/s3/test_key.py
  85. +2 −2 tests/s3/test_mfa.py
  86. +1 −1 tests/s3/test_multidelete.py
  87. +1 −1 tests/s3/test_versioning.py
  88. +6 −6 tests/sqs/test_connection.py
  89. +2 −2 tests/swf/test_layer1_workflow_execution.py
  90. +9 −9 tests/utils/test_password.py
View
4 README.rst
@@ -1,8 +1,8 @@
####
boto
####
-boto 2.4.1
-16-May-2012
+boto 2.5.0
+13-Jun-2012
************
Introduction
View
21 bin/elbadmin
@@ -46,7 +46,14 @@ def list(elb):
def get(elb, name):
"""Get details about ELB <name>"""
- elbs = elb.get_all_load_balancers(name)
+ try:
+ elbs = elb.get_all_load_balancers(name)
+ except boto.exception.BotoServerError as se:
+ if se.code == 'LoadBalancerNotFound':
+ elbs = []
+ else:
+ raise
+
if len(elbs) < 1:
print "No load balancer by the name of %s found" % name
return
@@ -55,7 +62,13 @@ def get(elb, name):
print "="*80
print "Name: %s" % b.name
print "DNS Name: %s" % b.dns_name
+ if b.canonical_hosted_zone_name:
+ print "Canonical hosted zone name: %s" % b.canonical_hosted_zone_name
+ if b.canonical_hosted_zone_name_id:
+ print "Canonical hosted zone name id: %s" % b.canonical_hosted_zone_name_id
+ print
+ print "Health Check: %s" % b.health_check
print
print "Listeners"
@@ -75,8 +88,10 @@ def get(elb, name):
print "Instances"
print "---------"
- for i in b.instances:
- print i.id
+ print "%-12s %-15s %s" % ("ID", "STATE", "DESCRIPTION")
+ for state in b.get_instance_health():
+ print "%-12s %-15s %s" % (state.instance_id, state.state,
+ state.description)
print
View
28 bin/s3multiput
@@ -41,8 +41,8 @@ SYNOPSIS
s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
-b/--bucket <bucket_name> [-c/--callback <num_cb>]
[-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
- [-n/--no_op] [-p/--prefix <prefix>] [-q/--quiet]
- [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced] path
+ [-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
+ [-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced] path
Where
access_key - Your AWS Access Key ID. If not supplied, boto will
@@ -76,6 +76,9 @@ SYNOPSIS
/bar/fie.baz
The prefix must end in a trailing separator and if it
does not then one will be added.
+ key_prefix - A prefix to be added to the S3 key name, after any
+ stripping of the file path is done based on the
+ "-p/--prefix" option.
reduced - Use Reduced Redundancy storage
grant - A canned ACL policy that will be granted on each file
transferred to S3. The value of provided must be one
@@ -98,10 +101,10 @@ def usage():
def submit_cb(bytes_so_far, total_bytes):
print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)
-def get_key_name(fullpath, prefix):
+def get_key_name(fullpath, prefix, key_prefix):
key_name = fullpath[len(prefix):]
l = key_name.split(os.sep)
- return '/'.join(l)
+ return key_prefix + '/'.join(l)
def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
source_path, offset, bytes, debug, cb, num_cb, amount_of_retries=10):
@@ -189,15 +192,16 @@ def main():
quiet = False
no_op = False
prefix = '/'
+ key_prefix = ''
grant = None
no_overwrite = False
reduced = False
try:
- opts, args = getopt.getopt(sys.argv[1:], 'a:b:c::d:g:hi:np:qs:wr',
+ opts, args = getopt.getopt(sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
- 'ignore=', 'no_op', 'prefix=', 'quiet', 'secret_key=', 'no_overwrite',
- 'reduced'])
+ 'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet', 'secret_key=',
+ 'no_overwrite', 'reduced'])
except:
usage()
@@ -226,6 +230,8 @@ def main():
prefix = a
if prefix[-1] != os.sep:
prefix = prefix + os.sep
+ if o in ('-k', '--key_prefix'):
+ key_prefix = a
if o in ('-q', '--quiet'):
quiet = True
if o in ('-s', '--secret_key'):
@@ -256,15 +262,15 @@ def main():
if not quiet:
print 'Getting list of existing keys to check against'
keys = []
- for key in b.list(get_key_name(path, prefix)):
+ for key in b.list(get_key_name(path, prefix, key_prefix)):
keys.append(key.name)
for root, dirs, files in os.walk(path):
for ignore in ignore_dirs:
if ignore in dirs:
dirs.remove(ignore)
for file in files:
fullpath = os.path.join(root, file)
- key_name = get_key_name(fullpath, prefix)
+ key_name = get_key_name(fullpath, prefix, key_prefix)
copy_file = True
if no_overwrite:
if key_name in keys:
@@ -290,7 +296,7 @@ def main():
# upload a single file
elif os.path.isfile(path):
- key_name = get_key_name(os.path.abspath(path), prefix)
+ key_name = get_key_name(os.path.abspath(path), prefix, key_prefix)
copy_file = True
if no_overwrite:
if b.get_key(key_name):
@@ -314,4 +320,4 @@ def main():
reduced, debug, cb, num_cb, grant or 'private')
if __name__ == "__main__":
- main()
+ main()
View
134 boto/__init__.py
@@ -26,25 +26,29 @@
from boto.pyami.config import Config, BotoConfigLocations
from boto.storage_uri import BucketStorageUri, FileStorageUri
import boto.plugin
-import os, re, sys
+import os
+import re
+import sys
import logging
import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.4.1'
-Version = __version__ # for backware compatibility
+__version__ = '2.5.0'
+Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
config = Config()
+
def init_logging():
for file in BotoConfigLocations:
try:
logging.config.fileConfig(os.path.expanduser(file))
except:
pass
+
class NullHandler(logging.Handler):
def emit(self, record):
pass
@@ -54,6 +58,8 @@ def emit(self, record):
init_logging()
# convenience function to set logging to a particular file
+
+
def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
global log
if not format_string:
@@ -67,6 +73,7 @@ def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
logger.addHandler(fh)
log = logger
+
def set_stream_logger(name, level=logging.DEBUG, format_string=None):
global log
if not format_string:
@@ -80,6 +87,7 @@ def set_stream_logger(name, level=logging.DEBUG, format_string=None):
logger.addHandler(fh)
log = logger
+
def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -94,6 +102,7 @@ def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.sqs.connection import SQSConnection
return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -108,6 +117,7 @@ def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.s3.connection import S3Connection
return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
"""
@type gs_access_key_id: string
@@ -122,6 +132,7 @@ def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
from boto.gs.connection import GSConnection
return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs)
+
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -136,6 +147,7 @@ def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.ec2.connection import EC2Connection
return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -150,7 +162,9 @@ def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.ec2.elb import ELBConnection
return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
-def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+
+def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -162,9 +176,12 @@ def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwar
:return: A connection to Amazon's Auto Scaling Service
"""
from boto.ec2.autoscale import AutoScaleConnection
- return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ return AutoScaleConnection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
-def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -176,7 +193,9 @@ def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwa
:return: A connection to Amazon's EC2 Monitoring service
"""
from boto.ec2.cloudwatch import CloudWatchConnection
- return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ return CloudWatchConnection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
@@ -192,6 +211,7 @@ def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.sdb.connection import SDBConnection
return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -206,7 +226,9 @@ def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.fps.connection import FPSConnection
return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
-def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+
+def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -220,7 +242,9 @@ def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.mturk.connection import MTurkConnection
return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
-def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+
+def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -232,7 +256,9 @@ def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwa
:return: A connection to FPS
"""
from boto.cloudfront import CloudFrontConnection
- return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ return CloudFrontConnection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
@@ -248,6 +274,7 @@ def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.vpc import VPCConnection
return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -262,6 +289,7 @@ def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.rds import RDSConnection
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -276,6 +304,7 @@ def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.emr import EmrConnection
return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -305,7 +334,9 @@ def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.iam import IAMConnection
return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
-def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+
+def connect_route53(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -317,9 +348,12 @@ def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, **kwargs
:return: A connection to Amazon's Route53 DNS Service
"""
from boto.route53 import Route53Connection
- return Route53Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ return Route53Connection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
-def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None,
+ **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
@@ -331,7 +365,9 @@ def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None, *
:return: A connection to Amazon's CloudFormation Service
"""
from boto.cloudformation import CloudFormationConnection
- return CloudFormationConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ return CloudFormationConnection(aws_access_key_id, aws_secret_access_key,
+ **kwargs)
+
def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Eucalyptus', is_secure=False,
@@ -371,7 +407,9 @@ def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
region=reg, port=port, path=path,
is_secure=is_secure, **kwargs)
-def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None,
+
+def connect_ec2_endpoint(url, aws_access_key_id=None,
+ aws_secret_access_key=None,
**kwargs):
"""
Connect to an EC2 Api endpoint. Additional arguments are passed
@@ -398,14 +436,16 @@ def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None
if not 'is_secure' in kwargs:
kwargs['is_secure'] = (purl.scheme == "https")
- kwargs['region'] = RegionInfo(name = purl.hostname,
- endpoint = purl.hostname)
- kwargs['aws_access_key_id']=aws_access_key_id
- kwargs['aws_secret_access_key']=aws_secret_access_key
+ kwargs['region'] = RegionInfo(name=purl.hostname,
+ endpoint=purl.hostname)
+ kwargs['aws_access_key_id'] = aws_access_key_id
+ kwargs['aws_secret_access_key'] = aws_secret_access_key
return(connect_ec2(**kwargs))
-def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None,
+
+def connect_walrus(host=None, aws_access_key_id=None,
+ aws_secret_access_key=None,
port=8773, path='/services/Walrus', is_secure=False,
**kwargs):
"""
@@ -443,6 +483,7 @@ def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
+
def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -457,6 +498,7 @@ def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.ses import SESConnection
return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
@@ -471,21 +513,21 @@ def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.sts import STSConnection
return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
is_secure=False, **kwargs):
"""
Connect to the Internet Archive via their S3-like API.
:type ia_access_key_id: string
- :param ia_access_key_id: Your IA Access Key ID. This will also look in your
- boto config file for an entry in the Credentials
- section called "ia_access_key_id"
+ :param ia_access_key_id: Your IA Access Key ID. This will also look
+ in your boto config file for an entry in the Credentials
+ section called "ia_access_key_id"
:type ia_secret_access_key: string
:param ia_secret_access_key: Your IA Secret Access Key. This will also
- look in your boto config file for an entry
- in the Credentials section called
- "ia_secret_access_key"
+ look in your boto config file for an entry in the Credentials
+ section called "ia_secret_access_key"
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to the Internet Archive
@@ -503,6 +545,7 @@ def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
+
def connect_dynamodb(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
@@ -519,6 +562,7 @@ def connect_dynamodb(aws_access_key_id=None,
from boto.dynamodb.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
+
def connect_swf(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
@@ -554,43 +598,6 @@ def connect_cloudsearch(aws_access_key_id=None,
**kwargs)
-def check_extensions(module_name, module_path):
- """
- This function checks for extensions to boto modules. It should be called in the
- __init__.py file of all boto modules. See:
- http://code.google.com/p/boto/wiki/ExtendModules
-
- for details.
- """
- option_name = '%s_extend' % module_name
- version = config.get('Boto', option_name, None)
- if version:
- dirname = module_path[0]
- path = os.path.join(dirname, version)
- if os.path.isdir(path):
- log.info('extending module %s with: %s' % (module_name, path))
- module_path.insert(0, path)
-
-_aws_cache = {}
-
-def _get_aws_conn(service):
- global _aws_cache
- conn = _aws_cache.get(service)
- if not conn:
- meth = getattr(sys.modules[__name__], 'connect_' + service)
- conn = meth()
- _aws_cache[service] = conn
- return conn
-
-def lookup(service, name):
- global _aws_cache
- conn = _get_aws_conn(service)
- obj = _aws_cache.get('.'.join((service, name)), None)
- if not obj:
- obj = conn.lookup(name)
- _aws_cache['.'.join((service, name))] = obj
- return obj
-
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True):
@@ -674,6 +681,7 @@ def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
scheme, bucket_name, object_name, debug,
suppress_consec_slashes=suppress_consec_slashes)
+
def storage_uri_for_key(key):
"""Returns a StorageUri for the given key.
View
62 boto/auth.py
@@ -104,30 +104,34 @@ class AnonAuthHandler(AuthHandler, HmacKeys):
"""
Implements Anonymous requests.
"""
-
+
capability = ['anon']
-
+
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
-
+
def add_auth(self, http_request, **kwargs):
pass
class HmacAuthV1Handler(AuthHandler, HmacKeys):
""" Implements the HMAC request signing used by S3 and GS."""
-
+
capability = ['hmac-v1', 's3']
-
+
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
-
+
+ def update_provider(self, provider):
+ super(HmacAuthV1Handler, self).update_provider(provider)
+ self._hmac_256 = None
+
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
method = http_request.method
auth_path = http_request.auth_path
- if not headers.has_key('Date'):
+ if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
@@ -148,35 +152,39 @@ class HmacAuthV2Handler(AuthHandler, HmacKeys):
Implements the simplified HMAC authorization used by CloudFront.
"""
capability = ['hmac-v2', 'cloudfront']
-
+
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
-
+
+ def update_provider(self, provider):
+ super(HmacAuthV2Handler, self).update_provider(provider)
+ self._hmac_256 = None
+
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
- if not headers.has_key('Date'):
+ if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
b64_hmac = self.sign_string(headers['Date'])
auth_hdr = self._provider.auth_header
headers['Authorization'] = ("%s %s:%s" %
(auth_hdr,
self._provider.access_key, b64_hmac))
-
+
class HmacAuthV3Handler(AuthHandler, HmacKeys):
"""Implements the new Version 3 HMAC authorization used by Route53."""
-
+
capability = ['hmac-v3', 'route53', 'ses']
-
+
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
-
+
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
- if not headers.has_key('Date'):
+ if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
b64_hmac = self.sign_string(headers['Date'])
@@ -188,9 +196,9 @@ class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys):
"""
Implements the new Version 3 HMAC authorization used by DynamoDB.
"""
-
+
capability = ['hmac-v3-http']
-
+
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
@@ -215,11 +223,10 @@ def canonical_headers(self, headers_to_sign):
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
- l = ['%s:%s'%(n.lower().strip(),
- headers_to_sign[n].strip()) for n in headers_to_sign]
- l.sort()
+ l = sorted(['%s:%s'%(n.lower().strip(),
+ headers_to_sign[n].strip()) for n in headers_to_sign])
return '\n'.join(l)
-
+
def string_to_sign(self, http_request):
"""
Return the canonical StringToSign as well as a dict
@@ -235,7 +242,7 @@ def string_to_sign(self, http_request):
'',
http_request.body])
return string_to_sign, headers_to_sign
-
+
def add_auth(self, req, **kwargs):
"""
Add AWS3 authentication to a request.
@@ -349,8 +356,7 @@ def _calc_signature(self, params, verb, path, server_name):
params['SignatureMethod'] = 'HmacSHA1'
if self._provider.security_token:
params['SecurityToken'] = self._provider.security_token
- keys = params.keys()
- keys.sort()
+ keys = sorted(params.keys())
pairs = []
for key in keys:
val = boto.utils.get_utf8_value(params[key])
@@ -369,7 +375,7 @@ def _calc_signature(self, params, verb, path, server_name):
class POSTPathQSV2AuthHandler(QuerySignatureV2AuthHandler, AuthHandler):
"""
Query Signature V2 Authentication relocating signed query
- into the path and allowing POST requests with Content-Types.
+ into the path and allowing POST requests with Content-Types.
"""
capability = ['mws']
@@ -403,7 +409,7 @@ def get_auth_handler(host, config, provider, requested_capability=None):
:type host: string
:param host: The name of the host
- :type config:
+ :type config:
:param config:
:type provider:
@@ -424,13 +430,13 @@ def get_auth_handler(host, config, provider, requested_capability=None):
ready_handlers.append(handler(host, config, provider))
except boto.auth_handler.NotReadyToAuthenticate:
pass
-
+
if not ready_handlers:
checked_handlers = auth_handlers
names = [handler.__name__ for handler in checked_handlers]
raise boto.exception.NoAuthHandlerFound(
'No handler was ready to authenticate. %d handlers were checked.'
- ' %s '
+ ' %s '
'Check your credentials' % (len(names), str(names)))
if len(ready_handlers) > 1:
View
98 boto/cloudformation/connection.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -31,6 +31,7 @@
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
+
class CloudFormationConnection(AWSQueryConnection):
"""
@@ -49,7 +50,7 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
- converter=None):
+ converter=None, security_token=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint, CloudFormationConnection)
@@ -59,7 +60,8 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
- https_connection_factory, path)
+ https_connection_factory, path,
+ security_token)
def _required_auth_capability(self):
return ['cloudformation']
@@ -68,11 +70,13 @@ def encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
- def create_stack(self, stack_name, template_body=None, template_url=None,
- parameters=[], notification_arns=[], disable_rollback=False,
- timeout_in_minutes=None, capabilities=None):
+ def _build_create_or_update_params(self, stack_name, template_body,
+ template_url, parameters,
+ notification_arns, disable_rollback,
+ timeout_in_minutes, capabilities):
"""
- Creates a CloudFormation Stack as specified by the template.
+ Helper that creates JSON parameters needed by a Stack Create or
+ Stack Update call.
:type stack_name: string
:param stack_name: The name of the Stack, must be unique amoung running
@@ -108,8 +112,8 @@ def create_stack(self, stack_name, template_body=None, template_url=None,
the stack. Currently, the only valid capability is
'CAPABILITY_IAM'.
- :rtype: string
- :return: The unique Stack ID.
+ :rtype: dict
+ :return: JSON parameters represented as a Python dict.
"""
params = {'ContentType': "JSON", 'StackName': stack_name,
'DisableRollback': self.encode_bool(disable_rollback)}
@@ -132,7 +136,54 @@ def create_stack(self, stack_name, template_body=None, template_url=None,
"NotificationARNs.member")
if timeout_in_minutes:
params['TimeoutInMinutes'] = int(timeout_in_minutes)
+ return params
+
+ def create_stack(self, stack_name, template_body=None, template_url=None,
+ parameters=[], notification_arns=[], disable_rollback=False,
+ timeout_in_minutes=None, capabilities=None):
+ """
+ Creates a CloudFormation Stack as specified by the template.
+
+ :type stack_name: string
+ :param stack_name: The name of the Stack, must be unique amoung running
+ Stacks
+
+ :type template_body: string
+ :param template_body: The template body (JSON string)
+
+ :type template_url: string
+ :param template_url: An S3 URL of a stored template JSON document. If
+ both the template_body and template_url are
+ specified, the template_body takes precedence
+
+ :type parameters: list of tuples
+ :param parameters: A list of (key, value) pairs for template input
+ parameters.
+ :type notification_arns: list of strings
+ :param notification_arns: A list of SNS topics to send Stack event
+ notifications to.
+
+ :type disable_rollback: bool
+ :param disable_rollback: Indicates whether or not to rollback on
+ failure.
+
+ :type timeout_in_minutes: int
+ :param timeout_in_minutes: Maximum amount of time to let the Stack
+ spend creating itself. If this timeout is exceeded,
+ the Stack will enter the CREATE_FAILED state.
+
+ :type capabilities: list
+ :param capabilities: The list of capabilities you want to allow in
+ the stack. Currently, the only valid capability is
+ 'CAPABILITY_IAM'.
+
+ :rtype: string
+ :return: The unique Stack ID.
+ """
+ params = self._build_create_or_update_params(stack_name,
+ template_body, template_url, parameters, notification_arns,
+ disable_rollback, timeout_in_minutes, capabilities)
response = self.make_request('CreateStack', params, '/', 'POST')
body = response.read()
if response.status == 200:
@@ -145,7 +196,7 @@ def create_stack(self, stack_name, template_body=None, template_url=None,
def update_stack(self, stack_name, template_body=None, template_url=None,
parameters=[], notification_arns=[], disable_rollback=False,
- timeout_in_minutes=None):
+ timeout_in_minutes=None, capabilities=None):
"""
Updates a CloudFormation Stack as specified by the template.
@@ -178,28 +229,17 @@ def update_stack(self, stack_name, template_body=None, template_url=None,
spend creating itself. If this timeout is exceeded,
the Stack will enter the CREATE_FAILED state
+ :type capabilities: list
+ :param capabilities: The list of capabilities you want to allow in
+ the stack. Currently, the only valid capability is
+ 'CAPABILITY_IAM'.
+
:rtype: string
:return: The unique Stack ID.
"""
- params = {'ContentType': "JSON", 'StackName': stack_name,
- 'DisableRollback': self.encode_bool(disable_rollback)}
- if template_body:
- params['TemplateBody'] = template_body
- if template_url:
- params['TemplateURL'] = template_url
- if template_body and template_url:
- boto.log.warning("If both TemplateBody and TemplateURL are"
- " specified, only TemplateBody will be honored by the API")
- if len(parameters) > 0:
- for i, (key, value) in enumerate(parameters):
- params['Parameters.member.%d.ParameterKey' % (i+1)] = key
- params['Parameters.member.%d.ParameterValue' % (i+1)] = value
- if len(notification_arns) > 0:
- self.build_list_params(params, notification_arns,
- "NotificationARNs.member")
- if timeout_in_minutes:
- params['TimeoutInMinutes'] = int(timeout_in_minutes)
-
+ params = self._build_create_or_update_params(stack_name,
+ template_body, template_url, parameters, notification_arns,
+ disable_rollback, timeout_in_minutes, capabilities)
response = self.make_request('UpdateStack', params, '/', 'POST')
body = response.read()
if response.status == 200:
View
2 boto/cloudformation/stack.py
@@ -240,7 +240,7 @@ def startElement(self, name, attrs, connection):
def endElement(self, name, value, connection):
if name == "LastUpdatedTimestamp":
- self.last_updated_timestampe = datetime.strptime(value,
+ self.last_updated_timestamp = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%SZ')
elif name == "LogicalResourceId":
self.logical_resource_id = value
View
79 boto/cloudfront/__init__.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -34,17 +34,19 @@
from boto.resultset import ResultSet
from boto.cloudfront.exception import CloudFrontServerError
+
class CloudFrontConnection(AWSAuthConnection):
DefaultHost = 'cloudfront.amazonaws.com'
Version = '2010-11-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
- host=DefaultHost, debug=0):
+ host=DefaultHost, debug=0, security_token=None):
AWSAuthConnection.__init__(self, host,
- aws_access_key_id, aws_secret_access_key,
- True, port, proxy, proxy_port, debug=debug)
+ aws_access_key_id, aws_secret_access_key,
+ True, port, proxy, proxy_port, debug=debug,
+ security_token=security_token)
def get_etag(self, response):
response_headers = response.msg
@@ -57,11 +59,12 @@ def _required_auth_capability(self):
return ['cloudfront']
# Generics
-
+
def _get_all_objects(self, resource, tags):
if not tags:
- tags=[('DistributionSummary', DistributionSummary)]
- response = self.make_request('GET', '/%s/%s' % (self.Version, resource))
+ tags = [('DistributionSummary', DistributionSummary)]
+ response = self.make_request('GET', '/%s/%s' % (self.Version,
+ resource))
body = response.read()
boto.log.debug(body)
if response.status >= 300:
@@ -99,24 +102,26 @@ def _get_config(self, id, resource, config_class):
h = handler.XmlHandler(d, self)
xml.sax.parseString(body, h)
return d
-
+
def _set_config(self, distribution_id, etag, config):
if isinstance(config, StreamingDistributionConfig):
resource = 'streaming-distribution'
else:
resource = 'distribution'
uri = '/%s/%s/%s/config' % (self.Version, resource, distribution_id)
- headers = {'If-Match' : etag, 'Content-Type' : 'text/xml'}
+ headers = {'If-Match': etag, 'Content-Type': 'text/xml'}
response = self.make_request('PUT', uri, headers, config.to_xml())
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise CloudFrontServerError(response.status, response.reason, body)
return self.get_etag(response)
-
+
def _create_object(self, config, resource, dist_class):
- response = self.make_request('POST', '/%s/%s' % (self.Version, resource),
- {'Content-Type' : 'text/xml'}, data=config.to_xml())
+ response = self.make_request('POST', '/%s/%s' % (self.Version,
+ resource),
+ {'Content-Type': 'text/xml'},
+ data=config.to_xml())
body = response.read()
boto.log.debug(body)
if response.status == 201:
@@ -127,19 +132,19 @@ def _create_object(self, config, resource, dist_class):
return d
else:
raise CloudFrontServerError(response.status, response.reason, body)
-
+
def _delete_object(self, id, etag, resource):
uri = '/%s/%s/%s' % (self.Version, resource, id)
- response = self.make_request('DELETE', uri, {'If-Match' : etag})
+ response = self.make_request('DELETE', uri, {'If-Match': etag})
body = response.read()
boto.log.debug(body)
if response.status != 204:
raise CloudFrontServerError(response.status, response.reason, body)
# Distributions
-
+
def get_all_distributions(self):
- tags=[('DistributionSummary', DistributionSummary)]
+ tags = [('DistributionSummary', DistributionSummary)]
return self._get_all_objects('distribution', tags)
def get_distribution_info(self, distribution_id):
@@ -148,25 +153,25 @@ def get_distribution_info(self, distribution_id):
def get_distribution_config(self, distribution_id):
return self._get_config(distribution_id, 'distribution',
DistributionConfig)
-
+
def set_distribution_config(self, distribution_id, etag, config):
return self._set_config(distribution_id, etag, config)
-
+
def create_distribution(self, origin, enabled, caller_reference='',
cnames=None, comment='', trusted_signers=None):
config = DistributionConfig(origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers)
return self._create_object(config, 'distribution', Distribution)
-
+
def delete_distribution(self, distribution_id, etag):
return self._delete_object(distribution_id, etag, 'distribution')
# Streaming Distributions
-
+
def get_all_streaming_distributions(self):
- tags=[('StreamingDistributionSummary', StreamingDistributionSummary)]
+ tags = [('StreamingDistributionSummary', StreamingDistributionSummary)]
return self._get_all_objects('streaming-distribution', tags)
def get_streaming_distribution_info(self, distribution_id):
@@ -176,10 +181,10 @@ def get_streaming_distribution_info(self, distribution_id):
def get_streaming_distribution_config(self, distribution_id):
return self._get_config(distribution_id, 'streaming-distribution',
StreamingDistributionConfig)
-
+
def set_streaming_distribution_config(self, distribution_id, etag, config):
return self._set_config(distribution_id, etag, config)
-
+
def create_streaming_distribution(self, origin, enabled,
caller_reference='',
cnames=None, comment='',
@@ -190,14 +195,15 @@ def create_streaming_distribution(self, origin, enabled,
trusted_signers=trusted_signers)
return self._create_object(config, 'streaming-distribution',
StreamingDistribution)
-
+
def delete_streaming_distribution(self, distribution_id, etag):
- return self._delete_object(distribution_id, etag, 'streaming-distribution')
+ return self._delete_object(distribution_id, etag,
+ 'streaming-distribution')
# Origin Access Identity
def get_all_origin_access_identity(self):
- tags=[('CloudFrontOriginAccessIdentitySummary',
+ tags = [('CloudFrontOriginAccessIdentitySummary',
OriginAccessIdentitySummary)]
return self._get_all_objects('origin-access-identity/cloudfront', tags)
@@ -209,23 +215,23 @@ def get_origin_access_identity_config(self, access_id):
return self._get_config(access_id,
'origin-access-identity/cloudfront',
OriginAccessIdentityConfig)
-
+
def set_origin_access_identity_config(self, access_id,
etag, config):
return self._set_config(access_id, etag, config)
-
+
def create_origin_access_identity(self, caller_reference='', comment=''):
config = OriginAccessIdentityConfig(caller_reference=caller_reference,
comment=comment)
return self._create_object(config, 'origin-access-identity/cloudfront',
OriginAccessIdentity)
-
+
def delete_origin_access_identity(self, access_id, etag):
return self._delete_object(access_id, etag,
'origin-access-identity/cloudfront')
# Object Invalidation
-
+
def create_invalidation_request(self, distribution_id, paths,
caller_reference=None):
"""Creates a new invalidation request
@@ -239,7 +245,7 @@ def create_invalidation_request(self, distribution_id, paths,
uri = '/%s/distribution/%s/invalidation' % (self.Version,
distribution_id)
response = self.make_request('POST', uri,
- {'Content-Type' : 'text/xml'},
+ {'Content-Type': 'text/xml'},
data=paths.to_xml())
body = response.read()
if response.status == 201:
@@ -249,9 +255,12 @@ def create_invalidation_request(self, distribution_id, paths,
else:
raise CloudFrontServerError(response.status, response.reason, body)
- def invalidation_request_status (self, distribution_id, request_id, caller_reference=None):
- uri = '/%s/distribution/%s/invalidation/%s' % (self.Version, distribution_id, request_id )
- response = self.make_request('GET', uri, {'Content-Type' : 'text/xml'})
+ def invalidation_request_status(self, distribution_id,
+ request_id, caller_reference=None):
+ uri = '/%s/distribution/%s/invalidation/%s' % (self.Version,
+ distribution_id,
+ request_id)
+ response = self.make_request('GET', uri, {'Content-Type': 'text/xml'})
body = response.read()
if response.status == 200:
paths = InvalidationBatch([])
@@ -260,5 +269,3 @@ def invalidation_request_status (self, distribution_id, request_id, caller_refer
return paths
else:
raise CloudFrontServerError(response.status, response.reason, body)
-
-
View
58 boto/connection.py
@@ -55,6 +55,7 @@
import time
import urllib, urlparse
import xml.sax
+from xml.etree import ElementTree
import auth
import auth_handler
@@ -232,6 +233,9 @@ def __init__(self):
# The last time the pool was cleaned.
self.last_clean_time = 0.0
self.mutex = threading.Lock()
+ ConnectionPool.STALE_DURATION = \
+ config.getfloat('Boto', 'connection_stale_duration',
+ ConnectionPool.STALE_DURATION)
def size(self):
"""
@@ -358,8 +362,8 @@ def authorize(self, connection, **kwargs):
self.headers['User-Agent'] = UserAgent
# I'm not sure if this is still needed, now that add_auth is
# setting the content-length for POST requests.
- if not self.headers.has_key('Content-Length'):
- if not self.headers.has_key('Transfer-Encoding') or \
+ if 'Content-Length' not in self.headers:
+ if 'Transfer-Encoding' not in self.headers or \
self.headers['Transfer-Encoding'] != 'chunked':
self.headers['Content-Length'] = str(len(self.body))
@@ -474,7 +478,8 @@ def __init__(self, host, aws_access_key_id=None, aws_secret_access_key=None,
# Allow overriding Provider
self.provider = provider
else:
- self.provider = Provider(provider,
+ self._provider_type = provider
+ self.provider = Provider(self._provider_type,
aws_access_key_id,
aws_secret_access_key,
security_token)
@@ -563,7 +568,7 @@ def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
- if os.environ.has_key('http_proxy') and not self.proxy:
+ if 'http_proxy' in os.environ and not self.proxy:
pattern = re.compile(
'(?:http://)?' \
'(?:(?P<user>\w+):(?P<pass>.*)@)?' \
@@ -622,7 +627,13 @@ def new_http_connection(self, host, is_secure):
else:
boto.log.debug('establishing HTTP connection: kwargs=%s' %
self.http_connection_kwargs)
- connection = httplib.HTTPConnection(host,
+ if self.https_connection_factory:
+ # even though the factory says https, this is too handy
+ # to not be able to allow overriding for http also.
+ connection = self.https_connection_factory(host,
+ **self.http_connection_kwargs)
+ else:
+ connection = httplib.HTTPConnection(host,
**self.http_connection_kwargs)
if self.debug > 1:
connection.set_debuglevel(self.debug)
@@ -730,6 +741,10 @@ def _mexe(self, request, sender=None, override_num_retries=None,
num_retries = override_num_retries
i = 0
connection = self.get_http_connection(request.host, self.is_secure)
+ # The original headers/params are stored so that we can restore them
+ # if credentials are refreshed.
+ original_headers = request.headers.copy()
+ original_params = request.params.copy()
while i <= num_retries:
# Use binary exponential backoff to desynchronize client requests
next_sleep = random.random() * (2 ** i)
@@ -764,6 +779,13 @@ def _mexe(self, request, sender=None, override_num_retries=None,
msg += 'Retrying in %3.1f seconds' % next_sleep
boto.log.debug(msg)
body = response.read()
+ elif self._credentials_expired(response):
+ # The same request object is used so the security token and
+ # access key params are cleared because they are no longer
+ # valid.
+ request.params = original_params.copy()
+ request.headers = original_headers.copy()
+ self._renew_credentials()
elif response.status < 300 or response.status >= 400 or \
not location:
self.put_http_connection(request.host, self.is_secure,
@@ -779,6 +801,7 @@ def _mexe(self, request, sender=None, override_num_retries=None,
boto.log.debug(msg)
connection = self.get_http_connection(request.host,
scheme == 'https')
+ response = None
continue
except self.http_exceptions, e:
for unretryable in self.http_unretryable_exceptions:
@@ -805,6 +828,31 @@ def _mexe(self, request, sender=None, override_num_retries=None,
msg = 'Please report this exception as a Boto Issue!'
raise BotoClientError(msg)
+ def _credentials_expired(self, response):
+ # It is possible that we could be using temporary credentials that are
+ # now expired. We want to detect when this happens so that we can
+ # refresh the credentials. Subclasses can override this method and
+ # determine whether or not the response indicates that the credentials
+ # are invalid. If this method returns True, the credentials will be
+ # renewed.
+ if response.status != 403:
+ return False
+ try:
+ for event, node in ElementTree.iterparse(response, events=['start']):
+ if node.tag.endswith('Code'):
+ if node.text == 'ExpiredToken':
+ return True
+ except ElementTree.ParseError:
+ return False
+ return False
+
+ def _renew_credentials(self):
+ # By resetting the provider with a new provider, this will trigger the
+ # lookup process for finding the new set of credentials.
+ boto.log.debug("Refreshing credentials.")
+ self.provider = Provider(self._provider_type)
+ self._auth_handler.update_provider(self.provider)
+
def build_base_http_request(self, method, path, auth_path,
params=None, headers=None, data='', host=None):
path = self.get_path(path)
View
11 boto/dynamodb/layer2.py
@@ -105,7 +105,7 @@ class Layer2(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
- host=None, debug=0, session_token=None, region=None):
+ debug=0, session_token=None, region=None):
self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
debug, session_token, region)
@@ -243,8 +243,13 @@ def list_tables(self, limit=None):
:param limit: The maximum number of tables to return.
"""
tables = []
- while True:
- result = self.layer1.list_tables(limit)
+ start_table = None
+ while not limit or len(tables) < limit:
+ this_round_limit = None
+ if limit:
+ this_round_limit = limit - len(tables)
+ this_round_limit = min(this_round_limit, 100)
+ result = self.layer1.list_tables(limit=this_round_limit, start_table=start_table)
tables.extend(result.get('TableNames', []))
start_table = result.get('LastEvaluatedTableName', None)
if not start_table:
View
39 boto/ec2/autoscale/__init__.py
@@ -48,6 +48,7 @@
'ap-northeast-1': 'autoscaling.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'autoscaling.ap-southeast-1.amazonaws.com'}
+
def regions():
"""
Get all available regions for the Auto Scaling service.
@@ -63,6 +64,7 @@ def regions():
regions.append(region)
return regions
+
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
@@ -79,17 +81,19 @@ def connect_to_region(region_name, **kw_params):
return region.connect(**kw_params)
return None
+
class AutoScaleConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'autoscale_version', '2011-01-01')
DefaultRegionEndpoint = boto.config.get('Boto', 'autoscale_endpoint',
'autoscaling.amazonaws.com')
- DefaultRegionName = boto.config.get('Boto', 'autoscale_region_name',
- 'us-east-1')
+ DefaultRegionName = boto.config.get('Boto', 'autoscale_region_name',
+ 'us-east-1')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
- https_connection_factory=None, region=None, path='/'):
+ https_connection_factory=None, region=None, path='/',
+ security_token=None):
"""
Init method to create a new connection to the AutoScaling service.
@@ -106,7 +110,8 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
- https_connection_factory, path=path)
+ https_connection_factory, path=path,
+ security_token=security_token)
def _required_auth_capability(self):
return ['ec2']
@@ -129,7 +134,7 @@ def build_list_params(self, params, items, label):
['us-east-1b',...]
"""
# different from EC2 list params
- for i in xrange(1, len(items)+1):
+ for i in xrange(1, len(items) + 1):
if isinstance(items[i-1], dict):
for k, v in items[i-1].iteritems():
if isinstance(v, dict):
@@ -213,6 +218,10 @@ def create_launch_configuration(self, launch_config):
params['InstanceMonitoring.Enabled'] = 'true'
else:
params['InstanceMonitoring.Enabled'] = 'false'
+ if launch_config.spot_price is not None:
+ params['SpotPrice'] = str(launch_config.spot_price)
+ if launch_config.instance_profile_name is not None:
+ params['IamInstanceProfile'] = launch_config.instance_profile_name
return self.get_object('CreateLaunchConfiguration', params,
Request, verb='POST')
@@ -333,7 +342,7 @@ def get_all_activities(self, autoscale_group, activity_ids=None,
name = autoscale_group
if isinstance(autoscale_group, AutoScalingGroup):
name = autoscale_group.name
- params = {'AutoScalingGroupName' : name}
+ params = {'AutoScalingGroupName': name}
if max_records:
params['MaxRecords'] = max_records
if next_token:
@@ -488,12 +497,13 @@ def suspend_processes(self, as_group, scaling_processes=None):
:param as_group: The auto scaling group to suspend processes on.
:type scaling_processes: list
- :param scaling_processes: Processes you want to suspend. If omitted, all
- processes will be suspended.
+ :param scaling_processes: Processes you want to suspend. If omitted,
+ all processes will be suspended.
"""
params = {'AutoScalingGroupName': as_group}
if scaling_processes:
- self.build_list_params(params, scaling_processes, 'ScalingProcesses')
+ self.build_list_params(params, scaling_processes,
+ 'ScalingProcesses')
return self.get_status('SuspendProcesses', params)
def resume_processes(self, as_group, scaling_processes=None):
@@ -510,7 +520,8 @@ def resume_processes(self, as_group, scaling_processes=None):
params = {'AutoScalingGroupName': as_group}
if scaling_processes:
- self.build_list_params(params, scaling_processes, 'ScalingProcesses')
+ self.build_list_params(params, scaling_processes,
+ 'ScalingProcesses')
return self.get_status('ResumeProcesses', params)
def create_scheduled_group_action(self, as_group, name, time,
@@ -646,8 +657,10 @@ def get_all_tags(self, filters=None, max_records=None, next_token=None):
"""
Lists the Auto Scaling group tags.
- This action supports pagination by returning a token if there are more
- pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter.
+ This action supports pagination by returning a token if there
+ are more pages to retrieve. To get the next page, call this
+ action again with the returned token as the NextToken
+ parameter.
:type filters: dict
:param filters: The value of the filter type used to identify
@@ -691,5 +704,3 @@ def delete_tags(self, tags):
for i, tag in enumerate(tags):
tag.build_params(params, i+1)
return self.get_status('DeleteTags', params, verb='POST')
-
-
View
31 boto/ec2/autoscale/launchconfig.py
@@ -1,4 +1,5 @@
# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -26,6 +27,8 @@
import base64
# this should use the corresponding object from boto.ec2
+
+
class Ebs(object):
def __init__(self, connection=None, snapshot_id=None, volume_size=None):
self.connection = connection
@@ -84,12 +87,14 @@ def endElement(self, name, value, connection):
elif name == 'VirtualName':
self.virtual_name = value
+
class LaunchConfiguration(object):
def __init__(self, connection=None, name=None, image_id=None,
key_name=None, security_groups=None, user_data=None,
instance_type='m1.small', kernel_id=None,
ramdisk_id=None, block_device_mappings=None,
- instance_monitoring=False):
+ instance_monitoring=False, spot_price=None,
+ instance_profile_name=None):
"""
A launch configuration.
@@ -98,14 +103,14 @@ def __init__(self, connection=None, name=None, image_id=None,
:type image_id: str
:param image_id: Unique ID of the Amazon Machine Image (AMI) which was
- assigned during registration.
+ assigned during registration.
:type key_name: str
:param key_name: The name of the EC2 key pair.
:type security_groups: list
:param security_groups: Names of the security groups with which to
- associate the EC2 instances.
+ associate the EC2 instances.
:type user_data: str
:param user_data: The user data available to launched EC2 instances.
@@ -121,11 +126,20 @@ def __init__(self, connection=None, name=None, image_id=None,
:type block_device_mappings: list
:param block_device_mappings: Specifies how block devices are exposed
- for instances
+ for instances
:type instance_monitoring: bool
:param instance_monitoring: Whether instances in group are launched
- with detailed monitoring.
+ with detailed monitoring.
+
+ :type spot_price: float
+ :param spot_price: The spot price you are bidding. Only applies
+ if you are building an autoscaling group with spot instances.
+
+ :type instance_profile_name: string
+ :param instance_profile_name: The name or the Amazon Resource
+ Name (ARN) of the instance profile associated with the IAM
+ role for the instance.
"""
self.connection = connection
self.name = name
@@ -141,6 +155,8 @@ def __init__(self, connection=None, name=None, image_id=None,
self.user_data = user_data
self.created_time = None
self.instance_monitoring = instance_monitoring
+ self.spot_price = spot_price
+ self.instance_profile_name = instance_profile_name
self.launch_configuration_arn = None
def __repr__(self):
@@ -181,10 +197,13 @@ def endElement(self, name, value, connection):
self.launch_configuration_arn = value
elif name == 'InstanceMonitoring':
self.instance_monitoring = value
+ elif name == 'SpotPrice':
+ self.spot_price = float(value)
+ elif name == 'IamInstanceProfile':
+ self.instance_profile_name = value
else:
setattr(self, name, value)
def delete(self):
""" Delete this launch configuration. """
return self.connection.delete_launch_configuration(self.name)
-
View
182 boto/ec2/cloudwatch/__init__.py
@@ -36,13 +36,14 @@
import boto
RegionData = {
- 'us-east-1' : 'monitoring.us-east-1.amazonaws.com',
- 'us-west-1' : 'monitoring.us-west-1.amazonaws.com',
- 'us-west-2' : 'monitoring.us-west-2.amazonaws.com',
- 'sa-east-1' : 'monitoring.sa-east-1.amazonaws.com',
- 'eu-west-1' : 'monitoring.eu-west-1.amazonaws.com',
- 'ap-northeast-1' : 'monitoring.ap-northeast-1.amazonaws.com',
- 'ap-southeast-1' : 'monitoring.ap-southeast-1.amazonaws.com'}
+ 'us-east-1': 'monitoring.us-east-1.amazonaws.com',
+ 'us-west-1': 'monitoring.us-west-1.amazonaws.com',
+ 'us-west-2': 'monitoring.us-west-2.amazonaws.com',
+ 'sa-east-1': 'monitoring.sa-east-1.amazonaws.com',
+ 'eu-west-1': 'monitoring.eu-west-1.amazonaws.com',
+ 'ap-northeast-1': 'monitoring.ap-northeast-1.amazonaws.com',
+ 'ap-southeast-1': 'monitoring.ap-southeast-1.amazonaws.com'}
+
def regions():
"""
@@ -59,6 +60,7 @@ def regions():
regions.append(region)
return regions
+
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
@@ -85,11 +87,11 @@ class CloudWatchConnection(AWSQueryConnection):
'cloudwatch_region_endpoint',
'monitoring.us-east-1.amazonaws.com')
-
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
- https_connection_factory=None, region=None, path='/'):
+ https_connection_factory=None, region=None, path='/',
+ security_token=None):
"""
Init method to create a new connection to EC2 Monitoring Service.
@@ -102,18 +104,19 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
- aws_secret_access_key,
+ aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
- https_connection_factory, path)
+ https_connection_factory, path,
+ security_token)
def _required_auth_capability(self):
return ['ec2']
def build_dimension_param(self, dimension, params):
prefix = 'Dimensions.member'
- i=0
+ i = 0
for dim_name in dimension:
dim_value = dimension[dim_name]
if dim_value:
@@ -133,14 +136,14 @@ def build_list_params(self, params, items, label):
for index, item in enumerate(items):
i = index + 1
if isinstance(item, dict):
- for k,v in item.iteritems():
+ for k, v in item.iteritems():
params[label % (i, 'Name')] = k
if v is not None:
params[label % (i, 'Value')] = v
else:
params[label % i] = item
- def build_put_params(self, params, name, value=None, timestamp=None,
+ def build_put_params(self, params, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
args = (name, value, unit, dimensions, statistics, timestamp)
length = max(map(lambda a: len(a) if isinstance(a, list) else 1, args))
@@ -157,13 +160,13 @@ def aslist(a):
if timestamp:
metric_data['Timestamp'] = t.isoformat()
-
+
if unit:
metric_data['Unit'] = u
-
+
if dimensions:
self.build_dimension_param(d, metric_data)
-
+
if statistics:
metric_data['StatisticValues.Maximum'] = s['maximum']
metric_data['StatisticValues.Minimum'] = s['minimum']
@@ -189,20 +192,20 @@ def get_metric_statistics(self, period, start_time, end_time, metric_name,
:type period: integer
:param period: The granularity, in seconds, of the returned datapoints.
- Period must be at least 60 seconds and must be a multiple
- of 60. The default value is 60.
+ Period must be at least 60 seconds and must be a multiple
+ of 60. The default value is 60.
:type start_time: datetime
- :param start_time: The time stamp to use for determining the first
- datapoint to return. The value specified is
- inclusive; results include datapoints with the
- time stamp specified.
+ :param start_time: The time stamp to use for determining the
+ first datapoint to return. The value specified is
+ inclusive; results include datapoints with the time stamp
+ specified.
:type end_time: datetime
- :param end_time: The time stamp to use for determining the last
- datapoint to return. The value specified is
- exclusive; results will include datapoints up to
- the time stamp specified.
+ :param end_time: The time stamp to use for determining the
+ last datapoint to return. The value specified is
+ exclusive; results will include datapoints up to the time
+ stamp specified.
:type metric_name: string
:param metric_name: The metric name.
@@ -212,7 +215,7 @@ def get_metric_statistics(self, period, start_time, end_time, metric_name,
:type statistics: list
:param statistics: A list of statistics names Valid values:
- Average | Sum | SampleCount | Maximum | Minimum
+ Average | Sum | SampleCount | Maximum | Minimum
:type dimensions: dict
:param dimensions: A dictionary of dimension key/values where
@@ -222,11 +225,11 @@ def get_metric_statistics(self, period, start_time, end_time, metric_name,
dimension.
:rtype: list
"""
- params = {'Period' : period,
- 'MetricName' : metric_name,
- 'Namespace' : namespace,
- 'StartTime' : start_time.isoformat(),
- 'EndTime' : end_time.isoformat()}
+ params = {'Period': period,
+ 'MetricName': metric_name,
+ 'Namespace': namespace,
+ 'StartTime': start_time.isoformat(),
+ 'EndTime': end_time.isoformat()}
self.build_list_params(params, statistics, 'Statistics.member.%d')
if dimensions:
self.build_dimension_param(dimensions, params)
@@ -240,30 +243,28 @@ def list_metrics(self, next_token=None, dimensions=None,
data available.
:type next_token: str
- :param next_token: A maximum of 500 metrics will be returned at one
- time. If more results are available, the
- ResultSet returned will contain a non-Null
- next_token attribute. Passing that token as a
- parameter to list_metrics will retrieve the
- next page of metrics.
+ :param next_token: A maximum of 500 metrics will be returned
+ at one time. If more results are available, the ResultSet
+ returned will contain a non-Null next_token attribute.
+ Passing that token as a parameter to list_metrics will
+ retrieve the next page of metrics.
:type dimension: dict
- :param dimension_filters: A dictionary containing name/value pairs
- that will be used to filter the results.
- The key in the dictionary is the name of
- a Dimension. The value in the dictionary
- is either a scalar value of that Dimension
- name that you want to filter on, a list
- of values to filter on or None if
- you want all metrics with that Dimension name.
+ :param dimension_filters: A dictionary containing name/value
+ pairs that will be used to filter the results. The key in
+ the dictionary is the name of a Dimension. The value in
+ the dictionary is either a scalar value of that Dimension
+ name that you want to filter on, a list of values to
+ filter on or None if you want all metrics with that
+ Dimension name.
:type metric_name: str
:param metric_name: The name of the Metric to filter against. If None,
- all Metric names will be returned.
+ all Metric names will be returned.
:type namespace: str
:param namespace: A Metric namespace to filter against (e.g. AWS/EC2).
- If None, Metrics from all namespaces will be returned.
+ If None, Metrics from all namespaces will be returned.
"""
params = {}
if next_token:
@@ -274,16 +275,16 @@ def list_metrics(self, next_token=None, dimensions=None,
params['MetricName'] = metric_name
if namespace:
params['Namespace'] = namespace
-
+
return self.get_list('ListMetrics', params, [('member', Metric)])
-
- def put_metric_data(self, namespace, name, value=None, timestamp=None,
+
+ def put_metric_data(self, namespace, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
"""
- Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch
- associates the data points with the specified metric. If the specified
- metric does not exist, Amazon CloudWatch creates the metric. If a list
- is specified for some, but not all, of the arguments, the remaining
+ Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch
+ associates the data points with the specified metric. If the specified
+ metric does not exist, Amazon CloudWatch creates the metric. If a list
+ is specified for some, but not all, of the arguments, the remaining
arguments are repeated a corresponding number of times.
:type namespace: str
@@ -296,24 +297,24 @@ def put_metric_data(self, namespace, name, value=None, timestamp=None,
:param value: The value for the metric.
:type timestamp: datetime or list
- :param timestamp: The time stamp used for the metric. If not specified,
+ :param timestamp: The time stamp used for the metric. If not specified,
the default value is set to the time the metric data was received.
-
+
:type unit: string or list
- :param unit: The unit of the metric. Valid Values: Seconds |
+ :param unit: The unit of the metric. Valid Values: Seconds |
Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None
-