diff --git a/bin/mturk b/bin/mturk index 8470740979..e0b4bab49e 100755 --- a/bin/mturk +++ b/bin/mturk @@ -25,8 +25,8 @@ import os.path import string import inspect import datetime, calendar -import json import boto.mturk.connection, boto.mturk.price, boto.mturk.question, boto.mturk.qualification +from boto.compat import json # -------------------------------------------------- # Globals diff --git a/bin/sdbadmin b/bin/sdbadmin index 7e87c7b355..3fbd3f4429 100755 --- a/bin/sdbadmin +++ b/bin/sdbadmin @@ -26,15 +26,7 @@ VERSION = "%prog version 1.0" import boto import time from boto import sdb - -# Allow support for JSON -try: - import simplejson as json -except: - try: - import json - except: - json = False +from boto.compat import json def choice_input(options, default=None, title=None): """ diff --git a/boto/beanstalk/exception.py b/boto/beanstalk/exception.py index c209cefca8..f6f9ffad55 100644 --- a/boto/beanstalk/exception.py +++ b/boto/beanstalk/exception.py @@ -1,5 +1,5 @@ import sys -import json +from boto.compat import json from boto.exception import BotoServerError diff --git a/boto/beanstalk/layer1.py b/boto/beanstalk/layer1.py index 7ae180f381..e63f70e714 100644 --- a/boto/beanstalk/layer1.py +++ b/boto/beanstalk/layer1.py @@ -21,10 +21,10 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -import json import boto import boto.jsonresponse +from boto.compat import json from boto.regioninfo import RegionInfo from boto.connection import AWSQueryConnection @@ -54,7 +54,7 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, security_token) def _required_auth_capability(self): - return ['sign-v2'] + return ['hmac-v4'] def _encode_bool(self, v): v = bool(v) diff --git a/boto/cloudsearch/__init__.py b/boto/cloudsearch/__init__.py index 9c8157a199..5ba1060e23 100644 --- a/boto/cloudsearch/__init__.py +++ b/boto/cloudsearch/__init__.py @@ -35,6 +35,9 @@ def regions(): return [RegionInfo(name='us-east-1', endpoint='cloudsearch.us-east-1.amazonaws.com', connection_cls=boto.cloudsearch.layer1.Layer1), + RegionInfo(name='eu-west-1', + endpoint='cloudsearch.eu-west-1.amazonaws.com', + connection_cls=boto.cloudsearch.layer1.Layer1), ] diff --git a/boto/cloudsearch/search.py b/boto/cloudsearch/search.py index 6e53254cb2..48d6698209 100644 --- a/boto/cloudsearch/search.py +++ b/boto/cloudsearch/search.py @@ -23,8 +23,8 @@ # from math import ceil import time -import json import boto +from boto.compat import json import requests diff --git a/boto/connection.py b/boto/connection.py index 8040b75915..9dce1f3f5c 100644 --- a/boto/connection.py +++ b/boto/connection.py @@ -730,6 +730,8 @@ def proxy_ssl(self, host=None, port=None): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((self.proxy, int(self.proxy_port))) + if "timeout" in self.http_connection_kwargs: + sock.settimeout(self.http_connection_kwargs["timeout"]) except: raise boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host) diff --git a/boto/core/credentials.py b/boto/core/credentials.py index 1f315a3206..b4b35b5305 100644 --- a/boto/core/credentials.py +++ b/boto/core/credentials.py @@ -23,8 +23,8 @@ # import os from six.moves import configparser +from boto.compat import json import requests -import json class Credentials(object): diff --git a/boto/datapipeline/layer1.py b/boto/datapipeline/layer1.py index 9dc87ecb9f..1c9a789c71 100644 --- a/boto/datapipeline/layer1.py +++ b/boto/datapipeline/layer1.py @@ -20,8 +20,8 @@ # IN THE SOFTWARE. # -import json import boto +from boto.compat import json from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo from boto.exception import JSONResponseError diff --git a/boto/ec2/blockdevicemapping.py b/boto/ec2/blockdevicemapping.py index ca0e9373c5..df774ae938 100644 --- a/boto/ec2/blockdevicemapping.py +++ b/boto/ec2/blockdevicemapping.py @@ -125,17 +125,18 @@ def build_list_params(self, params, prefix=''): params['%s.VirtualName' % pre] = block_dev.ephemeral_name else: if block_dev.no_device: - params['%s.Ebs.NoDevice' % pre] = 'true' - if block_dev.snapshot_id: - params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id - if block_dev.size: - params['%s.Ebs.VolumeSize' % pre] = block_dev.size - if block_dev.delete_on_termination: - params['%s.Ebs.DeleteOnTermination' % pre] = 'true' + params['%s.NoDevice' % pre] = '' else: - params['%s.Ebs.DeleteOnTermination' % pre] = 'false' - if block_dev.volume_type: - params['%s.Ebs.VolumeType' % pre] = block_dev.volume_type - if block_dev.iops is not None: - params['%s.Ebs.Iops' % pre] = block_dev.iops + if block_dev.snapshot_id: + params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id + if block_dev.size: + params['%s.Ebs.VolumeSize' % pre] = block_dev.size + if block_dev.delete_on_termination: + params['%s.Ebs.DeleteOnTermination' % pre] = 'true' + else: + params['%s.Ebs.DeleteOnTermination' % pre] = 'false' + if block_dev.volume_type: + params['%s.Ebs.VolumeType' % pre] = block_dev.volume_type + if block_dev.iops is not None: + params['%s.Ebs.Iops' % pre] = block_dev.iops i += 1 diff --git a/boto/ec2/instance.py b/boto/ec2/instance.py index 30311e0503..5be701f0f5 100644 --- a/boto/ec2/instance.py +++ b/boto/ec2/instance.py @@ -188,6 +188,8 @@ class Instance(TaggedEC2Object): :ivar product_codes: A list of product codes associated with this instance. :ivar ami_launch_index: This instances position within it's launch group. :ivar monitored: A boolean indicating whether monitoring is enabled or not. + :ivar monitoring_state: A string value that contains the actual value + of the monitoring element returned by EC2. :ivar spot_instance_request_id: The ID of the spot instance request if this is a spot instance. :ivar subnet_id: The VPC Subnet ID, if running in VPC. @@ -223,6 +225,7 @@ def __init__(self, connection=None): self.product_codes = ProductCodes() self.ami_launch_index = None self.monitored = False + self.monitoring_state = None self.spot_instance_request_id = None self.subnet_id = None self.vpc_id = None @@ -361,6 +364,7 @@ def endElement(self, name, value, connection): self.ramdisk = value elif name == 'state': if self._in_monitoring_element: + self.monitoring_state = value if value == 'enabled': self.monitored = True self._in_monitoring_element = False diff --git a/boto/elasticache/__init__.py b/boto/elasticache/__init__.py index b8b88d6b72..fe35d7076f 100644 --- a/boto/elasticache/__init__.py +++ b/boto/elasticache/__init__.py @@ -25,7 +25,7 @@ def regions(): """ - Get all available regions for the AWS Elastic Beanstalk service. + Get all available regions for the AWS ElastiCache service. :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` diff --git a/boto/elasticache/layer1.py b/boto/elasticache/layer1.py index 6bf87d765f..6c50438ab0 100644 --- a/boto/elasticache/layer1.py +++ b/boto/elasticache/layer1.py @@ -20,8 +20,9 @@ # IN THE SOFTWARE. # -import json + import boto +from boto.compat import json from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo diff --git a/boto/elastictranscoder/layer1.py b/boto/elastictranscoder/layer1.py index 3368dc05e9..293c4f04ef 100644 --- a/boto/elastictranscoder/layer1.py +++ b/boto/elastictranscoder/layer1.py @@ -144,6 +144,7 @@ def create_pipeline(self, name, input_bucket, output_bucket, role, :type notifications: dict :param notifications: The () topic that you want to notify to report job status. + To receive notifications, you must also subscribe to the new topic in the console. @@ -229,7 +230,7 @@ def create_preset(self, name, container, video, audio, thumbnails, def delete_pipeline(self, id): """ To delete a pipeline, send a DELETE request to the - `//pipelines/ [pipelineId] ` resource. + `//pipelines/ [pipelineId]` resource. You can only delete a pipeline that has never been used or that is not currently in use (doesn't contain any active @@ -246,7 +247,7 @@ def delete_pipeline(self, id): def delete_preset(self, id): """ To delete a preset, send a DELETE request to the `//presets/ - [presetId] ` resource. + [presetId]` resource. If the preset has been used, you cannot delete it. @@ -262,7 +263,7 @@ def list_jobs_by_pipeline(self, pipeline_id, ascending=None, page_token=None): """ To get a list of the jobs currently in a pipeline, send a GET - request to the `//jobsByPipeline/ [pipelineId] ` resource. + request to the `//jobsByPipeline/ [pipelineId]` resource. Elastic Transcoder returns all of the jobs currently in the specified pipeline. The response body contains one element for @@ -296,7 +297,7 @@ def list_jobs_by_pipeline(self, pipeline_id, ascending=None, def list_jobs_by_status(self, status, ascending=None, page_token=None): """ To get a list of the jobs that have a specified status, send a - GET request to the `//jobsByStatus/ [status] ` resource. + GET request to the `//jobsByStatus/ [status]` resource. Elastic Transcoder returns all of the jobs that have the specified status. The response body contains one element for @@ -352,7 +353,7 @@ def list_presets(self): def read_job(self, id): """ To get detailed information about a job, send a GET request to - the `//jobs/ [jobId] ` resource. + the `//jobs/ [jobId]` resource. :type id: string :param id: The identifier of the job for which you want to get detailed @@ -365,7 +366,7 @@ def read_job(self, id): def read_pipeline(self, id): """ To get detailed information about a pipeline, send a GET - request to the `//pipelines/ [pipelineId] ` resource. + request to the `//pipelines/ [pipelineId]` resource. :type id: string :param id: The identifier of the pipeline to read. @@ -377,7 +378,7 @@ def read_pipeline(self, id): def read_preset(self, id): """ To get detailed information about a preset, send a GET request - to the `//presets/ [presetId] ` resource. + to the `//presets/ [presetId]` resource. :type id: string :param id: The identifier of the preset for which you want to get diff --git a/boto/fps/connection.py b/boto/fps/connection.py index 3b9057e4bb..8f2aaee810 100644 --- a/boto/fps/connection.py +++ b/boto/fps/connection.py @@ -120,58 +120,65 @@ def _required_auth_capability(self): 'SenderTokenId', 'SettlementAmount.CurrencyCode']) @api_action() def settle_debt(self, action, response, **kw): - """Allows a caller to initiate a transaction that atomically - transfers money from a sender's payment instrument to the - recipient, while decreasing corresponding debt balance. + """ + Allows a caller to initiate a transaction that atomically transfers + money from a sender's payment instrument to the recipient, while + decreasing corresponding debt balance. """ return self.get_object(action, kw, response) @requires(['TransactionId']) @api_action() def get_transaction_status(self, action, response, **kw): - """Gets the latest status of a transaction. + """ + Gets the latest status of a transaction. """ return self.get_object(action, kw, response) @requires(['StartDate']) @api_action() def get_account_activity(self, action, response, **kw): - """Returns transactions for a given date range. + """ + Returns transactions for a given date range. """ return self.get_object(action, kw, response) @requires(['TransactionId']) @api_action() def get_transaction(self, action, response, **kw): - """Returns all details of a transaction. + """ + Returns all details of a transaction. """ return self.get_object(action, kw, response) @api_action() def get_outstanding_debt_balance(self, action, response): - """Returns the total outstanding balance for all the credit - instruments for the given creditor account. + """ + Returns the total outstanding balance for all the credit instruments + for the given creditor account. """ return self.get_object(action, {}, response) @requires(['PrepaidInstrumentId']) @api_action() def get_prepaid_balance(self, action, response, **kw): - """Returns the balance available on the given prepaid instrument. + """ + Returns the balance available on the given prepaid instrument. """ return self.get_object(action, kw, response) @api_action() def get_total_prepaid_liability(self, action, response): - """Returns the total liability held by the given account - corresponding to all the prepaid instruments owned by the - account. + """ + Returns the total liability held by the given account corresponding to + all the prepaid instruments owned by the account. """ return self.get_object(action, {}, response) @api_action() def get_account_balance(self, action, response): - """Returns the account balance for an account in real time. + """ + Returns the account balance for an account in real time. """ return self.get_object(action, {}, response) @@ -179,15 +186,17 @@ def get_account_balance(self, action, response): @requires(['PaymentInstruction', 'TokenType']) @api_action() def install_payment_instruction(self, action, response, **kw): - """Installs a payment instruction for caller. + """ + Installs a payment instruction for caller. """ return self.get_object(action, kw, response) @needs_caller_reference @requires(['returnURL', 'pipelineName']) def cbui_url(self, **kw): - """Generate a signed URL for the Co-Branded service API given - arguments as payload. + """ + Generate a signed URL for the Co-Branded service API given arguments as + payload. """ sandbox = 'sandbox' in self.host and 'payments-sandbox' or 'payments' endpoint = 'authorize.{0}.amazon.com'.format(sandbox) @@ -220,9 +229,10 @@ def cbui_url(self, **kw): 'TransactionAmount.CurrencyCode']) @api_action() def reserve(self, action, response, **kw): - """Reserve API is part of the Reserve and Settle API conjunction - that serve the purpose of a pay where the authorization and - settlement have a timing difference. + """ + Reserve API is part of the Reserve and Settle API conjunction that + serve the purpose of a pay where the authorization and settlement have + a timing difference. """ return self.get_object(action, kw, response) @@ -232,15 +242,16 @@ def reserve(self, action, response, **kw): 'TransactionAmount.CurrencyCode']) @api_action() def pay(self, action, response, **kw): - """Allows calling applications to move money from a sender to - a recipient. + """ + Allows calling applications to move money from a sender to a recipient. """ return self.get_object(action, kw, response) @requires(['TransactionId']) @api_action() def cancel(self, action, response, **kw): - """Cancels an ongoing transaction and puts it in cancelled state. + """ + Cancels an ongoing transaction and puts it in cancelled state. """ return self.get_object(action, kw, response) @@ -249,8 +260,9 @@ def cancel(self, action, response, **kw): 'TransactionAmount.CurrencyCode']) @api_action() def settle(self, action, response, **kw): - """The Settle API is used in conjunction with the Reserve API and - is used to settle previously reserved transaction. + """ + The Settle API is used in conjunction with the Reserve API and is used + to settle previously reserved transaction. """ return self.get_object(action, kw, response) @@ -259,50 +271,57 @@ def settle(self, action, response, **kw): 'CallerReference', 'RefundAmount.CurrencyCode']) @api_action() def refund(self, action, response, **kw): - """Refunds a previously completed transaction. + """ + Refunds a previously completed transaction. """ return self.get_object(action, kw, response) @requires(['RecipientTokenId']) @api_action() def get_recipient_verification_status(self, action, response, **kw): - """Returns the recipient status. + """ + Returns the recipient status. """ return self.get_object(action, kw, response) @requires(['CallerReference'], ['TokenId']) @api_action() def get_token_by_caller(self, action, response, **kw): - """Returns the details of a particular token installed by this - calling application using the subway co-branded UI. + """ + Returns the details of a particular token installed by this calling + application using the subway co-branded UI. """ return self.get_object(action, kw, response) @requires(['UrlEndPoint', 'HttpParameters']) @api_action() def verify_signature(self, action, response, **kw): - """Verify the signature that FPS sent in IPN or callback urls. + """ + Verify the signature that FPS sent in IPN or callback urls. """ return self.get_object(action, kw, response) @api_action() def get_tokens(self, action, response, **kw): - """Returns a list of tokens installed on the given account. + """ + Returns a list of tokens installed on the given account. """ return self.get_object(action, kw, response) @requires(['TokenId']) @api_action() def get_token_usage(self, action, response, **kw): - """Returns the usage of a token. + """ + Returns the usage of a token. """ return self.get_object(action, kw, response) @requires(['TokenId']) @api_action() def cancel_token(self, action, response, **kw): - """Cancels any token installed by the calling application on - its own account. + """ + Cancels any token installed by the calling application on its own + account. """ return self.get_object(action, kw, response) @@ -312,14 +331,16 @@ def cancel_token(self, action, response, **kw): 'SenderTokenId', 'FundingAmount.CurrencyCode']) @api_action() def fund_prepaid(self, action, response, **kw): - """Funds the prepaid balance on the given prepaid instrument. + """ + Funds the prepaid balance on the given prepaid instrument. """ return self.get_object(action, kw, response) @requires(['CreditInstrumentId']) @api_action() def get_debt_balance(self, action, response, **kw): - """Returns the balance corresponding to the given credit instrument. + """ + Returns the balance corresponding to the given credit instrument. """ return self.get_object(action, kw, response) @@ -329,22 +350,25 @@ def get_debt_balance(self, action, response, **kw): 'AdjustmentAmount.CurrencyCode']) @api_action() def write_off_debt(self, action, response, **kw): - """Allows a creditor to write off the debt balance accumulated - partially or fully at any time. + """ + Allows a creditor to write off the debt balance accumulated partially + or fully at any time. """ return self.get_object(action, kw, response) @requires(['SubscriptionId']) @api_action() def get_transactions_for_subscription(self, action, response, **kw): - """Returns the transactions for a given subscriptionID. + """ + Returns the transactions for a given subscriptionID. """ return self.get_object(action, kw, response) @requires(['SubscriptionId']) @api_action() def get_subscription_details(self, action, response, **kw): - """Returns the details of Subscription for a given subscriptionID. + """ + Returns the details of Subscription for a given subscriptionID. """ return self.get_object(action, kw, response) @@ -353,7 +377,8 @@ def get_subscription_details(self, action, response, **kw): @requires(['SubscriptionId']) @api_action() def cancel_subscription_and_refund(self, action, response, **kw): - """Cancels a subscription. + """ + Cancels a subscription. """ message = "If you specify a RefundAmount, " \ "you must specify CallerReference." @@ -364,6 +389,7 @@ def cancel_subscription_and_refund(self, action, response, **kw): @requires(['TokenId']) @api_action() def get_payment_instruction(self, action, response, **kw): - """Gets the payment instruction of a token. + """ + Gets the payment instruction of a token. """ return self.get_object(action, kw, response) diff --git a/boto/glacier/concurrent.py b/boto/glacier/concurrent.py index a956f0660a..da66c44b59 100644 --- a/boto/glacier/concurrent.py +++ b/boto/glacier/concurrent.py @@ -194,13 +194,18 @@ def run(self): except Empty: continue if work is _END_SENTINEL: + self._cleanup() return result = self._process_chunk(work) self._result_queue.put(result) + self._cleanup() def _process_chunk(self, work): pass + def _cleanup(self): + pass + class UploadWorkerThread(TransferThread): def __init__(self, api, vault_name, filename, upload_id, @@ -248,6 +253,9 @@ def _upload_chunk(self, work): response.read() return (part_number, tree_hash_bytes) + def _cleanup(self): + self._fileobj.close() + class ConcurrentDownloader(ConcurrentTransferer): """ diff --git a/boto/https_connection.py b/boto/https_connection.py index 760e6081b6..4cbf5182db 100644 --- a/boto/https_connection.py +++ b/boto/https_connection.py @@ -106,6 +106,8 @@ def __init__(self, host, port=default_port, key_file=None, cert_file=None, def connect(self): "Connect to a host on a given (SSL) port." sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if hasattr(self, "timeout") and self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(self.timeout) sock.connect((self.host, self.port)) boto.log.debug("wrapping ssl socket; CA certificate file=%s", self.ca_certs) diff --git a/boto/rds/dbsecuritygroup.py b/boto/rds/dbsecuritygroup.py index 6a69ddb0dc..378360667d 100644 --- a/boto/rds/dbsecuritygroup.py +++ b/boto/rds/dbsecuritygroup.py @@ -28,13 +28,18 @@ class DBSecurityGroup(object): """ Represents an RDS database security group - Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html + Properties reference available from the AWS documentation at + http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html - :ivar Status: The current status of the security group. Possibile values are [ active, ? ]. Reference documentation lacks specifics of possibilities - :ivar connection: boto.rds.RDSConnection associated with the current object + :ivar Status: The current status of the security group. Possible values are + [ active, ? ]. Reference documentation lacks specifics of possibilities + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the current object :ivar description: The description of the security group - :ivar ec2_groups: List of EC2SecurityGroup objects that this security group PERMITS - :ivar ip_ranges: List of IPRange objects (containing CIDR addresses) that this security group PERMITS + :ivar ec2_groups: List of :py:class:`EC2 Security Group + ` objects that this security + group PERMITS + :ivar ip_ranges: List of :py:class:`boto.rds.dbsecuritygroup.IPRange` + objects (containing CIDR addresses) that this security group PERMITS :ivar name: Name of the security group :ivar owner_id: ID of the owner of the security group. Can be 'None' """ @@ -83,13 +88,14 @@ def authorize(self, cidr_ip=None, ec2_group=None): You need to pass in either a CIDR block to authorize or and EC2 SecurityGroup. - @type cidr_ip: string - @param cidr_ip: A valid CIDR IP range to authorize + :type cidr_ip: string + :param cidr_ip: A valid CIDR IP range to authorize - @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>` + :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup` + :param ec2_group: An EC2 security group to authorize - @rtype: bool - @return: True if successful. + :rtype: bool + :return: True if successful. """ if isinstance(ec2_group, SecurityGroup): group_name = ec2_group.name @@ -108,13 +114,14 @@ def revoke(self, cidr_ip=None, ec2_group=None): You need to pass in either a CIDR block or an EC2 SecurityGroup from which to revoke access. - @type cidr_ip: string - @param cidr_ip: A valid CIDR IP range to revoke + :type cidr_ip: string + :param cidr_ip: A valid CIDR IP range to revoke - @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>` + :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup` + :param ec2_group: An EC2 security group to revoke - @rtype: bool - @return: True if successful. + :rtype: bool + :return: True if successful. """ if isinstance(ec2_group, SecurityGroup): group_name = ec2_group.name @@ -131,6 +138,8 @@ def revoke(self, cidr_ip=None, ec2_group=None): class IPRange(object): """ Describes a CIDR address range for use in a DBSecurityGroup + + :ivar cidr_ip: IP Address range """ def __init__(self, parent=None): @@ -174,4 +183,4 @@ def endElement(self, name, value, connection): elif name == 'EC2SecurityGroupOwnerId': self.owner_id = value else: - setattr(self, name, value) \ No newline at end of file + setattr(self, name, value) diff --git a/boto/s3/bucket.py b/boto/s3/bucket.py index c36a4079a1..dbc0f67dea 100644 --- a/boto/s3/bucket.py +++ b/boto/s3/bucket.py @@ -592,6 +592,8 @@ def delete_key(self, key_name, headers=None, version_id=None, created or removed and what version_id the delete created or removed. """ + if not key_name: + raise ValueError('Empty key names are not allowed') return self._delete_key_internal(key_name, headers=headers, version_id=version_id, mfa_token=mfa_token, diff --git a/boto/s3/key.py b/boto/s3/key.py index fc755601a4..22dd3d48b1 100644 --- a/boto/s3/key.py +++ b/boto/s3/key.py @@ -1197,12 +1197,16 @@ class of the new Key to be REDUCED_REDUNDANCY. The Reduced :param encrypt_key: If True, the new copy of the object will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. + + :rtype: int + :return: The number of bytes written to the key. """ fp = open(filename, 'rb') try: - self.set_contents_from_file(fp, headers, replace, cb, num_cb, - policy, md5, reduced_redundancy, - encrypt_key=encrypt_key) + return self.set_contents_from_file(fp, headers, replace, + cb, num_cb, policy, + md5, reduced_redundancy, + encrypt_key=encrypt_key) finally: fp.close() diff --git a/boto/swf/layer2.py b/boto/swf/layer2.py index 10c34d5586..cb3298e165 100644 --- a/boto/swf/layer2.py +++ b/boto/swf/layer2.py @@ -41,9 +41,9 @@ def __init__(self, **kwargs): def __repr__(self): """Generate string representation.""" - rep_str = self.name + rep_str = str(self.name) if hasattr(self, 'version'): - rep_str += '-' + getattr(self, 'version') + rep_str += '-' + str(getattr(self, 'version')) return '<%s %r at 0x%x>' % (self.__class__.__name__, rep_str, id(self)) class Domain(SWFBase): diff --git a/boto/vpc/vpc.py b/boto/vpc/vpc.py index 0539acd852..8d4c40a7a3 100644 --- a/boto/vpc/vpc.py +++ b/boto/vpc/vpc.py @@ -52,3 +52,14 @@ def endElement(self, name, value, connection): def delete(self): return self.connection.delete_vpc(self.id) + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False): + vpc_list = self.connection.get_all_vpcs([self.id]) + if len(vpc_list): + updated_vpc = vpc_list[0] + self._update(updated_vpc) + elif validate: + raise ValueError('%s is not a valid VPC ID' % (self.id,)) + return self.state diff --git a/docs/source/cloudsearch_tut.rst b/docs/source/cloudsearch_tut.rst index 7172a47dd7..806bf4b366 100644 --- a/docs/source/cloudsearch_tut.rst +++ b/docs/source/cloudsearch_tut.rst @@ -16,7 +16,9 @@ The first step in accessing CloudSearch is to create a connection to the service The recommended method of doing this is as follows:: >>> import boto.cloudsearch - >>> conn = boto.cloudsearch.connect_to_region("us-east-1", aws_access_key_id= ', aws_secret_access_key='') + >>> conn = boto.cloudsearch.connect_to_region("us-east-1", + ... aws_access_key_id=', + ... aws_secret_access_key='') At this point, the variable conn will point to a CloudSearch connection object in the us-east-1 region. Currently, this is the only region which has the @@ -40,7 +42,7 @@ Creating a Domain Once you have a connection established with the CloudSearch service, you will want to create a domain. A domain encapsulates the data that you wish to index, -as well as indexes and metadata relating to it. +as well as indexes and metadata relating to it:: >>> from boto.cloudsearch.domain import Domain >>> domain = Domain(conn, conn.create_domain('demo')) @@ -51,8 +53,9 @@ document service, which you will use to index and search. Setting access policies ----------------------- -Before you can connect to a document service, you need to set the correct access properties. -For example, if you were connecting from 192.168.1.0, you could give yourself access as follows: +Before you can connect to a document service, you need to set the correct +access properties. For example, if you were connecting from 192.168.1.0, you +could give yourself access as follows:: >>> our_ip = '192.168.1.0' @@ -61,50 +64,57 @@ For example, if you were connecting from 192.168.1.0, you could give yourself ac >>> policy.allow_search_ip(our_ip) >>> policy.allow_doc_ip(our_ip) -You can use the allow_search_ip() and allow_doc_ip() methods to give different -CIDR blocks access to searching and the document service respectively. +You can use the :py:meth:`allow_search_ip +` and +:py:meth:`allow_doc_ip ` +methods to give different CIDR blocks access to searching and the document +service respectively. Creating index fields --------------------- Each domain can have up to twenty index fields which are indexed by the CloudSearch service. For each index field, you will need to specify whether -it's a text or integer field, as well as optionaly a default value. +it's a text or integer field, as well as optionaly a default value:: >>> # Create an 'text' index field called 'username' >>> uname_field = domain.create_index_field('username', 'text') >>> # Epoch time of when the user last did something - >>> time_field = domain.create_index_field('last_activity', 'uint', default=0) + >>> time_field = domain.create_index_field('last_activity', + ... 'uint', + ... default=0) It is also possible to mark an index field as a facet. Doing so allows a search query to return categories into which results can be grouped, or to create -drill-down categories +drill-down categories:: >>> # But it would be neat to drill down into different countries >>> loc_field = domain.create_index_field('location', 'text', facet=True) Finally, you can also mark a snippet of text as being able to be returned -directly in your search query by using the results option. +directly in your search query by using the results option:: >>> # Directly insert user snippets in our results >>> snippet_field = domain.create_index_field('snippet', 'text', result=True) -You can add up to 20 index fields in this manner: +You can add up to 20 index fields in this manner:: - >>> follower_field = domain.create_index_field('follower_count', 'uint', default=0) + >>> follower_field = domain.create_index_field('follower_count', + ... 'uint', + ... default=0) Adding Documents to the Index ----------------------------- Now, we can add some documents to our new search domain. First, you will need a -document service object through which queries are sent: +document service object through which queries are sent:: >>> doc_service = domain.get_document_service() For this example, we will use a pre-populated list of sample content for our import. You would normally pull such data from your database or another -document store. +document store:: >>> users = [ { @@ -142,27 +152,30 @@ document store. ] When adding documents to our document service, we will batch them together. You -can schedule a document to be added by using the add() method. Whenever you are -adding a document, you must provide a unique ID, a version ID, and the actual -document to be indexed. In this case, we are using the user ID as our unique -ID. The version ID is used to determine which is the latest version of an -object to be indexed. If you wish to update a document, you must use a higher -version ID. In this case, we are using the time of the user's last activity as -a version number. +can schedule a document to be added by using the :py:meth:`add +` method. Whenever you are adding a +document, you must provide a unique ID, a version ID, and the actual document +to be indexed. In this case, we are using the user ID as our unique ID. The +version ID is used to determine which is the latest version of an object to be +indexed. If you wish to update a document, you must use a higher version ID. In +this case, we are using the time of the user's last activity as a version +number:: >>> for user in users: >>> doc_service.add(user['id'], user['last_activity'], user) When you are ready to send the batched request to the document service, you can -do with the commit() method. Note that cloudsearch will charge per 1000 batch -uploads. Each batch upload must be under 5MB. +do with the :py:meth:`commit +` method. Note that +cloudsearch will charge per 1000 batch uploads. Each batch upload must be under +5MB:: >>> result = doc_service.commit() -The result is an instance of `cloudsearch.CommitResponse` which will -make the plain dictionary response a nice object (ie result.adds, -result.deletes) and raise an exception for us if all of our documents -weren't actually committed. +The result is an instance of :py:class:`CommitResponse +` which will make the plain +dictionary response a nice object (ie result.adds, result.deletes) and raise an +exception for us if all of our documents weren't actually committed. After you have successfully committed some documents to cloudsearch, you must use :py:meth:`clear_sdf @@ -173,12 +186,13 @@ cleared. Searching Documents ------------------- -Now, let's try performing a search. First, we will need a SearchServiceConnection: +Now, let's try performing a search. First, we will need a +SearchServiceConnection:: >>> search_service = domain.get_search_service() A standard search will return documents which contain the exact words being -searched for. +searched for:: >>> results = search_service.search(q="dan") >>> results.hits @@ -186,7 +200,7 @@ searched for. >>> map(lambda x: x['id'], results) [u'1', u'4'] -The standard search does not look at word order: +The standard search does not look at word order:: >>> results = search_service.search(q="dinosaur dress") >>> results.hits @@ -196,7 +210,7 @@ The standard search does not look at word order: It's also possible to do more complex queries using the bq argument (Boolean Query). When you are using bq, your search terms must be enclosed in single -quotes. +quotes:: >>> results = search_service.search(bq="'dan'") >>> results.hits @@ -205,7 +219,7 @@ quotes. [u'1', u'4'] When you are using boolean queries, it's also possible to use wildcards to -extend your search to all words which start with your search terms: +extend your search to all words which start with your search terms:: >>> results = search_service.search(bq="'dan*'") >>> results.hits @@ -215,7 +229,7 @@ extend your search to all words which start with your search terms: The boolean query also allows you to create more complex queries. You can OR term together using "|", AND terms together using "+" or a space, and you can -remove words from the query using the "-" operator. +remove words from the query using the "-" operator:: >>> results = search_service.search(bq="'watched|moved'") >>> results.hits @@ -224,7 +238,7 @@ remove words from the query using the "-" operator. [u'3', u'4'] By default, the search will return 10 terms but it is possible to adjust this -by using the size argument as follows: +by using the size argument as follows:: >>> results = search_service.search(bq="'dan*'", size=2) >>> results.hits @@ -232,7 +246,8 @@ by using the size argument as follows: >>> map(lambda x: x['id'], results) [u'1', u'2'] -It is also possible to offset the start of the search by using the start argument as follows: +It is also possible to offset the start of the search by using the start +argument as follows:: >>> results = search_service.search(bq="'dan*'", start=2) >>> results.hits @@ -244,18 +259,20 @@ It is also possible to offset the start of the search by using the start argumen Ordering search results and rank expressions -------------------------------------------- -If your search query is going to return many results, it is good to be able to sort them -You can order your search results by using the rank argument. You are able to -sort on any fields which have the results option turned on. +If your search query is going to return many results, it is good to be able to +sort them. You can order your search results by using the rank argument. You are +able to sort on any fields which have the results option turned on:: >>> results = search_service.search(bq=query, rank=['-follower_count']) You can also create your own rank expressions to sort your results according to -other criteria: +other criteria, such as showing most recently active user, or combining the +recency score with the text_relevance:: - >>> domain.create_rank_expression('recently_active', 'last_activity') # We'll want to be able to just show the most recently active users + >>> domain.create_rank_expression('recently_active', 'last_activity') - >>> domain.create_rank_expression('activish', 'text_relevance + ((follower_count/(time() - last_activity))*1000)') # Let's get trickier and combine text relevance with a really dynamic expression + >>> domain.create_rank_expression('activish', + ... 'text_relevance + ((follower_count/(time() - last_activity))*1000)') >>> results = search_service.search(bq=query, rank=['-recently_active']) @@ -273,7 +290,7 @@ you map the term running to the stem run and then search for running, the request matches documents that contain run as well as running. To get the current stemming dictionary defined for a domain, use the -``get_stemming`` method of the Domain object. +:py:meth:`get_stemming ` method:: >>> stems = domain.get_stemming() >>> stems @@ -282,7 +299,7 @@ To get the current stemming dictionary defined for a domain, use the This returns a dictionary object that can be manipulated directly to add additional stems for your search domain by adding pairs of term:stem -to the stems dictionary. +to the stems dictionary:: >>> stems['stems']['running'] = 'run' >>> stems['stems']['ran'] = 'run' @@ -291,12 +308,12 @@ to the stems dictionary. >>> This has changed the value locally. To update the information in -Amazon CloudSearch, you need to save the data. +Amazon CloudSearch, you need to save the data:: >>> stems.save() You can also access certain CloudSearch-specific attributes related to -the stemming dictionary defined for your domain. +the stemming dictionary defined for your domain:: >>> stems.status u'RequiresIndexDocuments' @@ -321,7 +338,7 @@ so common that including them would result in a massive number of matches. To view the stopwords currently defined for your domain, use the -``get_stopwords`` method of the Domain object. +:py:meth:`get_stopwords ` method:: >>> stopwords = domain.get_stopwords() >>> stopwords @@ -344,17 +361,18 @@ To view the stopwords currently defined for your domain, use the u'the', u'to', u'was']} - >>> + >>> You can add additional stopwords by simply appending the values to the -list. +list:: >>> stopwords['stopwords'].append('foo') >>> stopwords['stopwords'].append('bar') >>> stopwords Similarly, you could remove currently defined stopwords from the list. -To save the changes, use the ``save`` method. +To save the changes, use the :py:meth:`save +` method:: >>> stopwords.save() @@ -371,13 +389,13 @@ the indexed term, the results will include documents that contain the indexed term. If you want two terms to match the same documents, you must define -them as synonyms of each other. For example: +them as synonyms of each other. For example:: cat, feline feline, cat To view the synonyms currently defined for your domain, use the -``get_synonyms`` method of the Domain object. +:py:meth:`get_synonyms ` method:: >>> synonyms = domain.get_synonyms() >>> synonyms @@ -385,12 +403,13 @@ To view the synonyms currently defined for your domain, use the >>> You can define new synonyms by adding new term:synonyms entries to the -synonyms dictionary object. +synonyms dictionary object:: >>> synonyms['synonyms']['cat'] = ['feline', 'kitten'] >>> synonyms['synonyms']['dog'] = ['canine', 'puppy'] -To save the changes, use the ``save`` method. +To save the changes, use the :py:meth:`save +` method:: >>> synonyms.save() @@ -400,6 +419,8 @@ that provide additional information about the stopwords in your domain. Deleting Documents ------------------ +It is also possible to delete documents:: + >>> import time >>> from datetime import datetime diff --git a/docs/source/ec2_tut.rst b/docs/source/ec2_tut.rst index 48f5e79fec..5aac3d7fa7 100644 --- a/docs/source/ec2_tut.rst +++ b/docs/source/ec2_tut.rst @@ -14,7 +14,7 @@ Creating a Connection The first step in accessing EC2 is to create a connection to the service. The recommended way of doing this in boto is:: - >>> from boto.ec2 + >>> import boto.ec2 >>> conn = boto.ec2.connect_to_region("us-east-1", ... aws_access_key_id='', ... aws_secret_access_key='') @@ -84,3 +84,95 @@ you can request instance termination. To do so you can use the call bellow:: Please use with care since once you request termination for an instance there is no turning back. +Checking What Instances Are Running +----------------------------------- +You can also get information on your currently running instances:: + + >>> reservations = conn.get_all_instances() + >>> reservations + [Reservation:r-00000000] + +A reservation corresponds to a command to start instances. You can see what +instances are associated with a reservation:: + + >>> instances = reservations[0].instances + >>> instances + [Instance:i-00000000] + +An instance object allows you get more meta-data available about the instance:: + + >>> inst = instances[0] + >>> inst.instance_type + u'c1.xlarge' + >>> inst.placement + u'us-east-1a' + +In this case, we can see that our instance is a c1.xlarge instance in the +`us-east-1a` availability zone. + +================================= +Using Elastic Block Storage (EBS) +================================= + + +EBS Basics +---------- + +EBS can be used by EC2 instances for permanent storage. Note that EBS volumes +must be in the same availability zone as the EC2 instance you wish to attach it +to. + +To actually create a volume you will need to specify a few details. The +following example will create a 50GB EBS in one of the `us-east-1a` availability +zones:: + + >>> vol = conn.create_volume(50, "us-east-1a") + >>> vol + Volume:vol-00000000 + +You can check that the volume is now ready and available:: + + >>> curr_vol = conn.get_all_volumes([vol.id])[0] + >>> curr_vol.status + u'available' + >>> curr_vol.zone + u'us-east-1a' + +We can now attach this volume to the EC2 instance we created earlier, making it +available as a new device:: + + >>> conn.attach_volume (vol.id, inst.id, "/dev/sdx") + u'attaching' + +You will now have a new volume attached to your instance. Note that with some +Linux kernels, `/dev/sdx` may get translated to `/dev/xvdx`. This device can +now be used as a normal block device within Linux. + +Working With Snapshots +---------------------- + +Snapshots allow you to make point-in-time snapshots of an EBS volume for future +recovery. Snapshots allow you to create incremental backups, and can also be +used to instantiate multiple new volumes. Snapshots can also be used to move +EBS volumes across availability zones or making backups to S3. + +Creating a snapshot is easy:: + + >>> snapshot = conn.create_snapshot(vol.id, 'My snapshot') + >>> snapshot + Snapshot:snap-00000000 + +Once you have a snapshot, you can create a new volume from it. Volumes are +created lazily from snapshots, which means you can start using such a volume +straight away:: + + >>> new_vol = snapshot.create_volume('us-east-1a') + >>> conn.attach_volume (new_vol.id, inst.id, "/dev/sdy") + u'attaching' + +If you no longer need a snapshot, you can also easily delete it:: + + >>> conn.delete_snapshot(snapshot.id) + True + + diff --git a/docs/source/index.rst b/docs/source/index.rst index c4297c2150..7d4ad3a9cd 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -111,6 +111,7 @@ Additional Resources ref/sdb_db dynamodb_tut ref/dynamodb + rds_tut ref/rds ref/cloudformation ref/iam diff --git a/docs/source/rds_tut.rst b/docs/source/rds_tut.rst index 97168d2f97..468b922792 100644 --- a/docs/source/rds_tut.rst +++ b/docs/source/rds_tut.rst @@ -1,4 +1,4 @@ -.. _sqs_tut: +.. _rds_tut: ======================================= An Introduction to boto's RDS interface diff --git a/docs/source/ref/cloudsearch.rst b/docs/source/ref/cloudsearch.rst index 14671ee5cc..1610200a54 100644 --- a/docs/source/ref/cloudsearch.rst +++ b/docs/source/ref/cloudsearch.rst @@ -7,7 +7,7 @@ Cloudsearch boto.cloudsearch ---------------- -.. automodule:: boto.swf +.. automodule:: boto.cloudsearch :members: :undoc-members: diff --git a/tests/integration/s3/test_connection.py b/tests/integration/s3/test_connection.py index b6733036bb..5d7473ee50 100644 --- a/tests/integration/s3/test_connection.py +++ b/tests/integration/s3/test_connection.py @@ -99,7 +99,8 @@ def test_1_basic(self): k.name = 'foo/bar' k.set_contents_from_string(s1, headers) k.name = 'foo/bas' - k.set_contents_from_filename('foobar') + size = k.set_contents_from_filename('foobar') + assert size == 42 k.name = 'foo/bat' k.set_contents_from_string(s1) k.name = 'fie/bar' diff --git a/tests/unit/beanstalk/test_layer1.py b/tests/unit/beanstalk/test_layer1.py index 6df75374e2..2ecec0d22a 100644 --- a/tests/unit/beanstalk/test_layer1.py +++ b/tests/unit/beanstalk/test_layer1.py @@ -44,11 +44,8 @@ def test_list_available_solution_stacks(self): self.assert_request_parameters({ 'Action': 'ListAvailableSolutionStacks', 'ContentType': 'JSON', - 'SignatureMethod': 'HmacSHA256', - 'SignatureVersion': 2, 'Version': '2010-12-01', - 'AWSAccessKeyId': 'aws_access_key_id', - }, ignore_params_values=['Timestamp']) + }) class TestCreateApplicationVersion(AWSMockServiceTestCase): @@ -78,16 +75,13 @@ def test_create_application_version(self): self.assert_request_parameters({ 'Action': 'CreateApplicationVersion', 'ContentType': 'JSON', - 'SignatureMethod': 'HmacSHA256', - 'SignatureVersion': 2, 'Version': '2010-12-01', 'ApplicationName': 'application1', 'AutoCreateApplication': 'true', 'SourceBundle.S3Bucket': 'mybucket', 'SourceBundle.S3Key': 'mykey', 'VersionLabel': 'version1', - 'AWSAccessKeyId': 'aws_access_key_id', - }, ignore_params_values=['Timestamp']) + }) self.assertEqual(app_version['ApplicationName'], 'application1') self.assertEqual(app_version['VersionLabel'], 'version1') @@ -114,15 +108,12 @@ def test_create_environment(self): 'EnvironmentName': 'environment1', 'TemplateName': '32bit Amazon Linux running Tomcat 7', 'ContentType': 'JSON', - 'SignatureMethod': 'HmacSHA256', - 'SignatureVersion': 2, 'Version': '2010-12-01', 'VersionLabel': 'version1', - 'AWSAccessKeyId': 'aws_access_key_id', 'OptionSettings.member.1.Namespace': 'aws:autoscaling:launchconfiguration', 'OptionSettings.member.1.OptionName': 'Ec2KeyName', 'OptionSettings.member.1.Value': 'mykeypair', 'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment', 'OptionSettings.member.2.OptionName': 'ENVVAR', 'OptionSettings.member.2.Value': 'VALUE1', - }, ignore_params_values=['Timestamp']) + }) diff --git a/tests/unit/glacier/test_concurrent.py b/tests/unit/glacier/test_concurrent.py index 98d228e699..2951a6c588 100644 --- a/tests/unit/glacier/test_concurrent.py +++ b/tests/unit/glacier/test_concurrent.py @@ -20,6 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # +import tempfile from Queue import Queue try: @@ -30,6 +31,7 @@ from tests.unit import AWSMockServiceTestCase from boto.glacier.concurrent import ConcurrentUploader, ConcurrentDownloader +from boto.glacier.concurrent import UploadWorkerThread class FakeThreadedConcurrentUploader(ConcurrentUploader): @@ -119,5 +121,23 @@ def test_downloader_work_queue_is_correctly_populated(self): self.assertEqual(len(items), 12) +class TestResourceCleanup(unittest.TestCase): + def setUp(self): + self.fileobj = tempfile.NamedTemporaryFile() + self.filename = self.fileobj.name + + def test_fileobj_closed_when_thread_shuts_down(self): + thread = UploadWorkerThread(mock.Mock(), 'vault_name', + self.filename, 'upload_id', + Queue(), Queue()) + fileobj = thread._fileobj + self.assertFalse(fileobj.closed) + # By settings should_continue to False, it should immediately + # exit, and we can still verify cleanup behavior. + thread.should_continue = False + thread.run() + self.assertTrue(fileobj.closed) + + if __name__ == '__main__': unittest.main() diff --git a/tests/unit/s3/test_bucket.py b/tests/unit/s3/test_bucket.py new file mode 100644 index 0000000000..de7e27cc70 --- /dev/null +++ b/tests/unit/s3/test_bucket.py @@ -0,0 +1,48 @@ +from tests.unit import unittest +from tests.unit import AWSMockServiceTestCase + +from boto.s3.connection import S3Connection +from boto.s3.bucket import Bucket + +class TestS3Bucket(AWSMockServiceTestCase): + connection_class = S3Connection + + def setUp(self): + super(TestS3Bucket, self).setUp() + + def test_bucket_create_bucket(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket_create') + self.assertEqual(bucket.name, 'mybucket_create') + + def test_bucket_constructor(self): + self.set_http_response(status_code=200) + bucket = Bucket(self.service_connection, 'mybucket_constructor') + self.assertEqual(bucket.name, 'mybucket_constructor') + + def test_bucket_basics(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + self.assertEqual(bucket.__repr__(), '') + + def test_bucket_new_key(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + key = bucket.new_key('mykey') + + self.assertEqual(key.bucket, bucket) + self.assertEqual(key.key, 'mykey') + + def test_bucket_new_key_missing_name(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + + with self.assertRaises(ValueError): + key = bucket.new_key('') + + def test_bucket_delete_key_missing_name(self): + self.set_http_response(status_code=200) + bucket = self.service_connection.create_bucket('mybucket') + + with self.assertRaises(ValueError): + key = bucket.delete_key('') diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 213f5db936..d2c3e2aa97 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -19,8 +19,14 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # +import urlparse from tests.unit import unittest +from httpretty import HTTPretty + from boto.connection import AWSQueryConnection +from boto.exception import BotoServerError +from boto.regioninfo import RegionInfo +from boto.compat import json class TestListParamsSerialization(unittest.TestCase): @@ -56,5 +62,205 @@ def test_simple_list_serialization(self): }, params) +class MockAWSService(AWSQueryConnection): + """ + Fake AWS Service + + This is used to test the AWSQueryConnection object is behaving properly. + """ + + APIVersion = '2012-01-01' + def _required_auth_capability(self): + return ['sign-v2'] + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, host=None, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + api_version=None, security_token=None, + validate_certs=True): + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs) + +class TestAWSQueryConnection(unittest.TestCase): + def setUp(self): + self.region = RegionInfo(name='cc-zone-1', + endpoint='mockservice.cc-zone-1.amazonaws.com', + connection_cls=MockAWSService) + + HTTPretty.enable() + + def tearDown(self): + HTTPretty.disable() + +class TestAWSQueryConnectionSimple(TestAWSQueryConnection): + def test_query_connection_basis(self): + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/' % self.region.endpoint, + json.dumps({'test': 'secure'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + + self.assertEqual(conn.host, 'mockservice.cc-zone-1.amazonaws.com') + + def test_single_command(self): + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/' % self.region.endpoint, + json.dumps({'test': 'secure'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + resp = conn.make_request('myCmd', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + + args = urlparse.parse_qs(HTTPretty.last_request.body) + self.assertEqual(args['AWSAccessKeyId'], ['access_key']) + self.assertEqual(args['SignatureMethod'], ['HmacSHA256']) + self.assertEqual(args['Version'], [conn.APIVersion]) + self.assertEqual(args['par1'], ['foo']) + self.assertEqual(args['par2'], ['baz']) + + self.assertEqual(resp.read(), '{"test": "secure"}') + + def test_multi_commands(self): + """Check connection re-use""" + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/' % self.region.endpoint, + json.dumps({'test': 'secure'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + + resp1 = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + body1 = urlparse.parse_qs(HTTPretty.last_request.body) + + resp2 = conn.make_request('myCmd2', + {'par3': 'bar', 'par4': 'narf'}, + "/", + "POST") + body2 = urlparse.parse_qs(HTTPretty.last_request.body) + + self.assertEqual(body1['par1'], ['foo']) + self.assertEqual(body1['par2'], ['baz']) + with self.assertRaises(KeyError): + body1['par3'] + + self.assertEqual(body2['par3'], ['bar']) + self.assertEqual(body2['par4'], ['narf']) + with self.assertRaises(KeyError): + body2['par1'] + + self.assertEqual(resp1.read(), '{"test": "secure"}') + self.assertEqual(resp2.read(), '{"test": "secure"}') + + def test_non_secure(self): + HTTPretty.register_uri(HTTPretty.POST, + 'http://%s/' % self.region.endpoint, + json.dumps({'test': 'normal'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret', + is_secure=False) + resp = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + + self.assertEqual(resp.read(), '{"test": "normal"}') + + def test_alternate_port(self): + HTTPretty.register_uri(HTTPretty.POST, + 'http://%s:8080/' % self.region.endpoint, + json.dumps({'test': 'alternate'}), + content_type='application/json') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret', + port=8080, + is_secure=False) + resp = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + "/", + "POST") + + self.assertEqual(resp.read(), '{"test": "alternate"}') + + def test_temp_failure(self): + responses = [HTTPretty.Response(body="{'test': 'fail'}", status=500), + HTTPretty.Response(body="{'test': 'success'}", status=200)] + + HTTPretty.register_uri(HTTPretty.POST, + 'https://%s/temp_fail/' % self.region.endpoint, + responses=responses) + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + resp = conn.make_request('myCmd1', + {'par1': 'foo', 'par2': 'baz'}, + '/temp_fail/', + 'POST') + self.assertEqual(resp.read(), "{'test': 'success'}") + +class TestAWSQueryStatus(TestAWSQueryConnection): + + def test_get_status(self): + HTTPretty.register_uri(HTTPretty.GET, + 'https://%s/status' % self.region.endpoint, + 'ok', + content_type='text/xml') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + resp = conn.get_status('getStatus', + {'par1': 'foo', 'par2': 'baz'}, + 'status') + + self.assertEqual(resp, "ok") + + def test_get_status_blank_error(self): + HTTPretty.register_uri(HTTPretty.GET, + 'https://%s/status' % self.region.endpoint, + '', + content_type='text/xml') + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + with self.assertRaises(BotoServerError): + resp = conn.get_status('getStatus', + {'par1': 'foo', 'par2': 'baz'}, + 'status') + + def test_get_status_error(self): + HTTPretty.register_uri(HTTPretty.GET, + 'https://%s/status' % self.region.endpoint, + 'error', + content_type='text/xml', + status=400) + + conn = self.region.connect(aws_access_key_id='access_key', + aws_secret_access_key='secret') + with self.assertRaises(BotoServerError): + resp = conn.get_status('getStatus', + {'par1': 'foo', 'par2': 'baz'}, + 'status') + if __name__ == '__main__': unittest.main()