From 97c4f5f6c751605dfef1d9eb67464c102a31648b Mon Sep 17 00:00:00 2001 From: Mitch Garnaat Date: Mon, 13 Jan 2014 02:08:55 -0800 Subject: [PATCH 01/60] Removing obselete core directory. --- boto/core/README | 58 ------------- boto/core/__init__.py | 23 ----- boto/core/auth.py | 78 ----------------- boto/core/credentials.py | 154 --------------------------------- boto/core/dictresponse.py | 178 -------------------------------------- boto/core/service.py | 67 -------------- 6 files changed, 558 deletions(-) delete mode 100644 boto/core/README delete mode 100644 boto/core/__init__.py delete mode 100644 boto/core/auth.py delete mode 100644 boto/core/credentials.py delete mode 100644 boto/core/dictresponse.py delete mode 100644 boto/core/service.py diff --git a/boto/core/README b/boto/core/README deleted file mode 100644 index 9c3f217fa2..0000000000 --- a/boto/core/README +++ /dev/null @@ -1,58 +0,0 @@ -What's This All About? -====================== - -This directory contains the beginnings of what is hoped will be the -new core of boto. We want to move from using httplib to using -requests. We also want to offer full support for Python 2.6, 2.7, and -3.x. This is a pretty big change and will require some time to roll -out but this module provides a starting point. - -What you will find in this module: - -* auth.py provides a SigV2 authentication packages as a args hook for requests. -* credentials.py provides a way of finding AWS credentials (see below). -* dictresponse.py provides a generic response handler that parses XML responses - and returns them as nested Python data structures. -* service.py provides a simple example of a service that actually makes an EC2 - request and returns a response. - -Credentials -=========== - -Credentials are being handled a bit differently here. The following -describes the order of search for credentials: - -1. If your local environment for has ACCESS_KEY and SECRET_KEY variables - defined, these will be used. - -2. If your local environment has AWS_CREDENTIAL_FILE defined, it is assumed - that it will be a config file with entries like this: - - [default] - access_key = xxxxxxxxxxxxxxxx - sercret_key = xxxxxxxxxxxxxxxxxx - - [test] - access_key = yyyyyyyyyyyyyy - secret_key = yyyyyyyyyyyyyyyyyy - - Each section in the config file is called a persona and you can reference - a particular persona by name when instantiating a Service class. - -3. If a standard boto config file is found that contains credentials, those - will be used. - -4. If temporary credentials for an IAM Role are found in the instance - metadata of an EC2 instance, these credentials will be used. - -Trying Things Out -================= -To try this code out, cd to the directory containing the core module. - - >>> import core.service - >>> s = core.service.Service() - >>> s.describe_instances() - -This code should return a Python data structure containing information -about your currently running EC2 instances. This example should run in -Python 2.6.x, 2.7.x and Python 3.x. \ No newline at end of file diff --git a/boto/core/__init__.py b/boto/core/__init__.py deleted file mode 100644 index e27666ddb1..0000000000 --- a/boto/core/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# diff --git a/boto/core/auth.py b/boto/core/auth.py deleted file mode 100644 index 890faa5b1d..0000000000 --- a/boto/core/auth.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -import requests.packages.urllib3 -import hmac -import base64 -from hashlib import sha256 -import sys -import datetime - -try: - from urllib.parse import quote -except ImportError: - from urllib import quote - - -class SigV2Auth(object): - """ - Sign an Query Signature V2 request. - """ - def __init__(self, credentials, api_version=''): - self.credentials = credentials - self.api_version = api_version - self.hmac = hmac.new(self.credentials.secret_key.encode('utf-8'), - digestmod=sha256) - - def calc_signature(self, args): - scheme, host, port = requests.packages.urllib3.get_host(args['url']) - string_to_sign = '%s\n%s\n%s\n' % (args['method'], host, '/') - hmac = self.hmac.copy() - args['params']['SignatureMethod'] = 'HmacSHA256' - if self.credentials.token: - args['params']['SecurityToken'] = self.credentials.token - sorted_params = sorted(args['params']) - pairs = [] - for key in sorted_params: - value = args['params'][key] - pairs.append(quote(key, safe='') + '=' + - quote(value, safe='-_~')) - qs = '&'.join(pairs) - string_to_sign += qs - print('string_to_sign') - print(string_to_sign) - hmac.update(string_to_sign.encode('utf-8')) - b64 = base64.b64encode(hmac.digest()).strip().decode('utf-8') - return (qs, b64) - - def add_auth(self, args): - args['params']['Action'] = 'DescribeInstances' - args['params']['AWSAccessKeyId'] = self.credentials.access_key - args['params']['SignatureVersion'] = '2' - args['params']['Timestamp'] = datetime.datetime.utcnow().isoformat() - args['params']['Version'] = self.api_version - qs, signature = self.calc_signature(args) - args['params']['Signature'] = signature - if args['method'] == 'POST': - args['data'] = args['params'] - args['params'] = {} diff --git a/boto/core/credentials.py b/boto/core/credentials.py deleted file mode 100644 index b4b35b5305..0000000000 --- a/boto/core/credentials.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -import os -from six.moves import configparser -from boto.compat import json -import requests - - -class Credentials(object): - """ - Holds the credentials needed to authenticate requests. In addition - the Credential object knows how to search for credentials and how - to choose the right credentials when multiple credentials are found. - """ - - def __init__(self, access_key=None, secret_key=None, token=None): - self.access_key = access_key - self.secret_key = secret_key - self.token = token - - -def _search_md(url='http://169.254.169.254/latest/meta-data/iam/'): - d = {} - try: - r = requests.get(url, timeout=.1) - if r.content: - fields = r.content.split('\n') - for field in fields: - if field.endswith('/'): - d[field[0:-1]] = get_iam_role(url + field) - else: - val = requests.get(url + field).content - if val[0] == '{': - val = json.loads(val) - else: - p = val.find('\n') - if p > 0: - val = r.content.split('\n') - d[field] = val - except (requests.Timeout, requests.ConnectionError): - pass - return d - - -def search_metadata(**kwargs): - credentials = None - metadata = _search_md() - # Assuming there's only one role on the instance profile. - if metadata: - metadata = metadata['iam']['security-credentials'].values()[0] - credentials = Credentials(metadata['AccessKeyId'], - metadata['SecretAccessKey'], - metadata['Token']) - return credentials - - -def search_environment(**kwargs): - """ - Search for credentials in explicit environment variables. - """ - credentials = None - access_key = os.environ.get(kwargs['access_key_name'].upper(), None) - secret_key = os.environ.get(kwargs['secret_key_name'].upper(), None) - if access_key and secret_key: - credentials = Credentials(access_key, secret_key) - return credentials - - -def search_file(**kwargs): - """ - If the 'AWS_CREDENTIAL_FILE' environment variable exists, parse that - file for credentials. - """ - credentials = None - if 'AWS_CREDENTIAL_FILE' in os.environ: - persona = kwargs.get('persona', 'default') - access_key_name = kwargs['access_key_name'] - secret_key_name = kwargs['secret_key_name'] - access_key = secret_key = None - path = os.getenv('AWS_CREDENTIAL_FILE') - path = os.path.expandvars(path) - path = os.path.expanduser(path) - cp = configparser.RawConfigParser() - cp.read(path) - if not cp.has_section(persona): - raise ValueError('Persona: %s not found' % persona) - if cp.has_option(persona, access_key_name): - access_key = cp.get(persona, access_key_name) - else: - access_key = None - if cp.has_option(persona, secret_key_name): - secret_key = cp.get(persona, secret_key_name) - else: - secret_key = None - if access_key and secret_key: - credentials = Credentials(access_key, secret_key) - return credentials - - -def search_boto_config(**kwargs): - """ - Look for credentials in boto config file. - """ - credentials = access_key = secret_key = None - if 'BOTO_CONFIG' in os.environ: - paths = [os.environ['BOTO_CONFIG']] - else: - paths = ['/etc/boto.cfg', '~/.boto'] - paths = [os.path.expandvars(p) for p in paths] - paths = [os.path.expanduser(p) for p in paths] - cp = configparser.RawConfigParser() - cp.read(paths) - if cp.has_section('Credentials'): - access_key = cp.get('Credentials', 'aws_access_key_id') - secret_key = cp.get('Credentials', 'aws_secret_access_key') - if access_key and secret_key: - credentials = Credentials(access_key, secret_key) - return credentials - -AllCredentialFunctions = [search_environment, - search_file, - search_boto_config, - search_metadata] - - -def get_credentials(persona='default'): - for cred_fn in AllCredentialFunctions: - credentials = cred_fn(persona=persona, - access_key_name='access_key', - secret_key_name='secret_key') - if credentials: - break - return credentials diff --git a/boto/core/dictresponse.py b/boto/core/dictresponse.py deleted file mode 100644 index 3730cf0edc..0000000000 --- a/boto/core/dictresponse.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# - -import xml.sax - - -def pythonize_name(name, sep='_'): - s = '' - if name[0].isupper: - s = name[0].lower() - for c in name[1:]: - if c.isupper(): - s += sep + c.lower() - else: - s += c - return s - - -class XmlHandler(xml.sax.ContentHandler): - - def __init__(self, root_node, connection): - self.connection = connection - self.nodes = [('root', root_node)] - self.current_text = '' - - def startElement(self, name, attrs): - self.current_text = '' - t = self.nodes[-1][1].startElement(name, attrs, self.connection) - if t is not None: - if isinstance(t, tuple): - self.nodes.append(t) - else: - self.nodes.append((name, t)) - - def endElement(self, name): - self.nodes[-1][1].endElement(name, self.current_text, self.connection) - if self.nodes[-1][0] == name: - self.nodes.pop() - self.current_text = '' - - def characters(self, content): - self.current_text += content - - def parse(self, s): - xml.sax.parseString(s, self) - - -class Element(dict): - - def __init__(self, connection=None, element_name=None, - stack=None, parent=None, list_marker=None, - item_marker=None, pythonize_name=False): - dict.__init__(self) - self.connection = connection - self.element_name = element_name - self.list_marker = list_marker or ['Set'] - self.item_marker = item_marker or ['member', 'item'] - if stack is None: - self.stack = [] - else: - self.stack = stack - self.pythonize_name = pythonize_name - self.parent = parent - - def __getattr__(self, key): - if key in self: - return self[key] - for k in self: - e = self[k] - if isinstance(e, Element): - try: - return getattr(e, key) - except AttributeError: - pass - raise AttributeError - - def get_name(self, name): - if self.pythonize_name: - name = pythonize_name(name) - return name - - def startElement(self, name, attrs, connection): - self.stack.append(name) - for lm in self.list_marker: - if name.endswith(lm): - l = ListElement(self.connection, name, self.list_marker, - self.item_marker, self.pythonize_name) - self[self.get_name(name)] = l - return l - if len(self.stack) > 0: - element_name = self.stack[-1] - e = Element(self.connection, element_name, self.stack, self, - self.list_marker, self.item_marker, - self.pythonize_name) - self[self.get_name(element_name)] = e - return (element_name, e) - else: - return None - - def endElement(self, name, value, connection): - if len(self.stack) > 0: - self.stack.pop() - value = value.strip() - if value: - if isinstance(self.parent, Element): - self.parent[self.get_name(name)] = value - elif isinstance(self.parent, ListElement): - self.parent.append(value) - - -class ListElement(list): - - def __init__(self, connection=None, element_name=None, - list_marker=['Set'], item_marker=('member', 'item'), - pythonize_name=False): - list.__init__(self) - self.connection = connection - self.element_name = element_name - self.list_marker = list_marker - self.item_marker = item_marker - self.pythonize_name = pythonize_name - - def get_name(self, name): - if self.pythonize_name: - name = utils.pythonize_name(name) - return name - - def startElement(self, name, attrs, connection): - for lm in self.list_marker: - if name.endswith(lm): - l = ListElement(self.connection, name, - self.list_marker, self.item_marker, - self.pythonize_name) - setattr(self, self.get_name(name), l) - return l - if name in self.item_marker: - e = Element(self.connection, name, parent=self, - list_marker=self.list_marker, - item_marker=self.item_marker, - pythonize_name=self.pythonize_name) - self.append(e) - return e - else: - return None - - def endElement(self, name, value, connection): - if name == self.element_name: - if len(self) > 0: - empty = [] - for e in self: - if isinstance(e, Element): - if len(e) == 0: - empty.append(e) - for e in empty: - self.remove(e) - else: - setattr(self, self.get_name(name), value) diff --git a/boto/core/service.py b/boto/core/service.py deleted file mode 100644 index e30adcbb52..0000000000 --- a/boto/core/service.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# - -import requests -from .auth import SigV2Auth -from .credentials import get_credentials -from .dictresponse import Element, XmlHandler - - -class Service(object): - """ - This is a simple example service that connects to the EC2 endpoint - and supports a single request (DescribeInstances) to show how to - use the requests-based code rather than the standard boto code which - is based on httplib. At the moment, the only auth mechanism - supported is SigV2. - """ - - def __init__(self, host='https://ec2.us-east-1.amazonaws.com', - path='/', api_version='2012-03-01', persona=None): - self.credentials = get_credentials(persona) - self.auth = SigV2Auth(self.credentials, api_version=api_version) - self.host = host - self.path = path - - def get_response(self, params, list_marker=None): - r = requests.post(self.host, params=params, - hooks={'args': self.auth.add_auth}) - r.encoding = 'utf-8' - body = r.text.encode('utf-8') - e = Element(list_marker=list_marker, pythonize_name=True) - h = XmlHandler(e, self) - h.parse(body) - return e - - def build_list_params(self, params, items, label): - if isinstance(items, basestring): - items = [items] - for i in range(1, len(items) + 1): - params['%s.%d' % (label, i)] = items[i - 1] - - def describe_instances(self, instance_ids=None): - params = {} - if instance_ids: - self.build_list_params(params, instance_ids, 'InstanceId') - return self.get_response(params) From 517e450e1d3775d66dc4e2d19252992de3f339df Mon Sep 17 00:00:00 2001 From: Demp Date: Tue, 18 Feb 2014 13:38:49 +0200 Subject: [PATCH 02/60] Classes inherit from old-style classes, super breaks them --- boto/ecs/item.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/boto/ecs/item.py b/boto/ecs/item.py index 4349e41e1d..a5b8fd1cc6 100644 --- a/boto/ecs/item.py +++ b/boto/ecs/item.py @@ -90,14 +90,14 @@ class Item(ResponseGroup): def __init__(self, connection=None): """Initialize this Item""" - super(Item, self).__init__(connection, "Item") + ResponseGroup.__init__(self, connection, "Item") class ItemSet(ResponseGroup): """A special ResponseGroup that has built-in paging, and only creates new Items on the "Item" tag""" def __init__(self, connection, action, params, page=0): - super(ItemSet, self).__init__(connection, "Items") + ResponseGroup.__init__(self, connection, "Items") self.objs = [] self.iter = None self.page = page @@ -150,4 +150,4 @@ def to_xml(self): """Override to first fetch everything""" for item in self: pass - return super(ItemSet, self).to_xml() + return ResponseGroup.to_xml(self) From 5a655503e2ebb59cb3f33721fcdbaefceee91ed3 Mon Sep 17 00:00:00 2001 From: Demp Date: Tue, 18 Feb 2014 13:40:30 +0200 Subject: [PATCH 03/60] Added item_lookup function and better error handling --- boto/ecs/__init__.py | 14 +++++++++++++- boto/ecs/item.py | 11 ++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/boto/ecs/__init__.py b/boto/ecs/__init__.py index d643afc76a..97c0d8dd89 100644 --- a/boto/ecs/__init__.py +++ b/boto/ecs/__init__.py @@ -21,6 +21,7 @@ import boto from boto.connection import AWSQueryConnection, AWSAuthConnection +from boto.exception import BotoServerError import time import urllib import xml.sax @@ -67,7 +68,7 @@ def get_response(self, action, params, page=0, itemSet=None): if response.status != 200: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) + raise BotoServerError(response.status, response.reason, body) if itemSet is None: rs = ItemSet(self, action, params, page) @@ -75,6 +76,8 @@ def get_response(self, action, params, page=0, itemSet=None): rs = itemSet h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) + if not rs.is_valid: + raise BotoServerError(response.status, '{Code}: {Message}'.format(**rs.errors[0])) return rs # @@ -91,3 +94,12 @@ def item_search(self, search_index, **params): """ params['SearchIndex'] = search_index return self.get_response('ItemSearch', params) + + def item_lookup(self, **params): + """ + Returns items that satisfy the lookup query. + + For a full list of parameters, see: + http://s3.amazonaws.com/awsdocs/Associates/2011-08-01/prod-adv-api-dg-2011-08-01.pdf + """ + return self.get_response('ItemLookup', params) \ No newline at end of file diff --git a/boto/ecs/item.py b/boto/ecs/item.py index a5b8fd1cc6..624088dcf0 100644 --- a/boto/ecs/item.py +++ b/boto/ecs/item.py @@ -106,6 +106,8 @@ def __init__(self, connection, action, params, page=0): self.curItem = None self.total_results = 0 self.total_pages = 0 + self.is_valid = False + self.errors = [] def startElement(self, name, attrs, connection): if name == "Item": @@ -119,7 +121,14 @@ def endElement(self, name, value, connection): self.total_results = value elif name == 'TotalPages': self.total_pages = value - elif name == "Item": + elif name == 'IsValid': + if value == 'True': + self.is_valid = True + elif name == 'Code': + self.errors.append({'Code': value, 'Message': None}) + elif name == 'Message': + self.errors[-1]['Message'] = value + elif name == 'Item': self.objs.append(self.curItem) self._xml.write(self.curItem.to_xml()) self.curItem = None From b8f0b473732009851521a240cac9c091e416424d Mon Sep 17 00:00:00 2001 From: Nathan Humphreys Date: Thu, 27 Feb 2014 15:39:48 +1300 Subject: [PATCH 04/60] Add support for SES API 2010-12-01 methods: SetIdentityFeedbackForwardingEnabled and SetIdentityNotificationTopic #2128 --- boto/ses/connection.py | 43 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/boto/ses/connection.py b/boto/ses/connection.py index df115232d8..a788574f7b 100644 --- a/boto/ses/connection.py +++ b/boto/ses/connection.py @@ -521,3 +521,46 @@ def delete_identity(self, identity): return self._make_request('DeleteIdentity', { 'Identity': identity, }) + + def set_identity_notification_topic(self, identity, notification_type, sns_topic=None): + """Sets an SNS topic to publish bounce or complaint notifications for + emails sent with the given identity as the Source. Publishing to topics + may only be disabled when feedback forwarding is enabled. + + :type identity: string + :param identity: An email address or domain name. + + :type notification_type: string + :param notification_type: The type of feedback notifications that will + be published to the specified topic. + Valid Values: Bounce | Complaint + + :type sns_topic: string or None + :param sns_topic: The Amazon Resource Name (ARN) of the Amazon Simple + Notification Service (Amazon SNS) topic. + """ + params = { + 'Identity': identity, + 'NotificationType': notification_type + } + if sns_topic: + params['SnsTopic'] = sns_topic + return self._make_request('SetIdentityNotificationTopic', params) + + def set_identity_feedback_forwarding_enabled(self, identity, forwarding_enabled=True): + """ + Enables or disables SES feedback notification via email. + Feedback forwarding may only be disabled when both complaint and + bounce topics are set. + + :type identity: string + :param identity: An email address or domain name. + + :type forwarding_enabled: bool + :param forwarding_enabled: Specifies whether or not to enable feedback forwarding. + """ + return self._make_request('SetIdentityFeedbackForwardingEnabled', { + 'Identity': identity, + 'ForwardingEnabled': 'true' if forwarding_enabled else 'false' + }) + From e47e1dc8fdf5f802d7ed202df3f4b7917225b2b5 Mon Sep 17 00:00:00 2001 From: Nathan Humphreys Date: Thu, 27 Feb 2014 21:32:43 +1300 Subject: [PATCH 05/60] added test cases for #2128 --- tests/unit/ses/test_identity.py | 89 +++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/tests/unit/ses/test_identity.py b/tests/unit/ses/test_identity.py index 6735e4a869..82af50e349 100644 --- a/tests/unit/ses/test_identity.py +++ b/tests/unit/ses/test_identity.py @@ -78,5 +78,94 @@ def test_ses_get_identity_dkim_list(self): tokens[2]) +class TestSESSetIdentityNotificationTopic(AWSMockServiceTestCase): + connection_class = SESConnection + + def setUp(self): + super(TestSESSetIdentityNotificationTopic, self).setUp() + + def default_body(self): + return """ + + + 299f4af4-b72a-11e1-901f-1fbd90e8104f + + """ + + def test_ses_set_identity_notification_topic_bounce(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_notification_topic( + identity='user@example.com', + notification_type='Bounce', + sns_topic='arn:aws:sns:us-east-1:123456789012:example') + + response = response['SetIdentityNotificationTopicResponse'] + result = response['SetIdentityNotificationTopicResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + def test_ses_set_identity_notification_topic_complaint(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_notification_topic( + identity='user@example.com', + notification_type='Complaint', + sns_topic='arn:aws:sns:us-east-1:123456789012:example') + + response = response['SetIdentityNotificationTopicResponse'] + result = response['SetIdentityNotificationTopicResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + +class TestSESSetIdentityFeedbackForwardingEnabled(AWSMockServiceTestCase): + connection_class = SESConnection + + def setUp(self): + super(TestSESSetIdentityFeedbackForwardingEnabled, self).setUp() + + def default_body(self): + return """ + + + 299f4af4-b72a-11e1-901f-1fbd90e8104f + + """ + + def test_ses_set_identity_feedback_forwarding_enabled_true(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_feedback_forwarding_enabled( + identity='user@example.com', + forwarding_enabled=True) + + response = response['SetIdentityFeedbackForwardingEnabledResponse'] + result = response['SetIdentityFeedbackForwardingEnabledResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + def test_ses_set_identity_notification_topic_enabled_false(self): + self.set_http_response(status_code=200) + + response = self.service_connection\ + .set_identity_feedback_forwarding_enabled( + identity='user@example.com', + forwarding_enabled=False) + + response = response['SetIdentityFeedbackForwardingEnabledResponse'] + result = response['SetIdentityFeedbackForwardingEnabledResult'] + + self.assertEqual(2, len(response)) + self.assertEqual(0, len(result)) + + if __name__ == '__main__': unittest.main() From a79b8b7e0e0335d7c07cbdb804db53b793fe19c7 Mon Sep 17 00:00:00 2001 From: sukrit007 Date: Fri, 28 Feb 2014 16:03:37 -0700 Subject: [PATCH 06/60] SWF:Adding support for passing region from Level2 API to Level1 API --- boto/swf/layer2.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/boto/swf/layer2.py b/boto/swf/layer2.py index fbb9f33d08..5ad1c8d344 100644 --- a/boto/swf/layer2.py +++ b/boto/swf/layer2.py @@ -23,6 +23,7 @@ class SWFBase(object): domain = None aws_access_key_id = None aws_secret_access_key = None + region = None def __init__(self, **kwargs): # Set default credentials. @@ -33,8 +34,9 @@ def __init__(self, **kwargs): for kwarg in kwargs: setattr(self, kwarg, kwargs[kwarg]) - self._swf = Layer1(self.aws_access_key_id, - self.aws_secret_access_key) + self._swf = Layer1(self.aws_access_key_id, + self.aws_secret_access_key, + region=self.region) def __repr__(self): rep_str = str(self.name) From c31948ba3f026b0a3a2f75238564b6812540c1b7 Mon Sep 17 00:00:00 2001 From: Daniel Beardsley Date: Tue, 4 Mar 2014 17:44:57 -0700 Subject: [PATCH 07/60] S3 Metadata: add failing test case for plus signs Without adding your own encoding layer ontop of boto's, there is no way to reliably store and then retrieve meta-data values that contain plus signs. quote_plus leaves existing pluses as they are while encoding spaces as "+". This turns a value like "x+ +x" into "x+++x" which upon retrieval from S3 using boto is decoded into "x x". A further commit will fix the bug. --- tests/integration/s3/test_key.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/s3/test_key.py b/tests/integration/s3/test_key.py index a3c29d1340..4a897e62c0 100644 --- a/tests/integration/s3/test_key.py +++ b/tests/integration/s3/test_key.py @@ -401,12 +401,14 @@ def test_header_encoding(self): key = self.bucket.new_key('test_header_encoding') key.set_metadata('Cache-control', 'public, max-age=500') + key.set_metadata('Test-Plus', u'A plus (+)') key.set_metadata('Content-disposition', u'filename=Schöne Zeit.txt') key.set_contents_from_string('foo') check = self.bucket.get_key('test_header_encoding') self.assertEqual(check.cache_control, 'public, max-age=500') + self.assertEqual(check.get_metadata('test-plus'), 'A plus (+)') self.assertEqual(check.content_disposition, 'filename=Sch%C3%B6ne+Zeit.txt') self.assertEqual( urllib.unquote_plus(check.content_disposition).decode('utf-8'), From 0cd457906bc53d2f55d8a4b028bf8505b2fbbb17 Mon Sep 17 00:00:00 2001 From: Daniel Beardsley Date: Tue, 4 Mar 2014 17:49:54 -0700 Subject: [PATCH 08/60] S3 Metadata: allow storage of plus signs (+) Previously, there was no way to reliably store and then retrieve meta-data values that contained plus signs. quote_plus left existing pluses as they were while encoding spaces as "+". This would turn a value like "x+ +x" into "x+++x" which upon retrieval from S3 using boto would be decoded into "x x". This change allows storage of plus characters "+" in s3 key metadata values. --- boto/connection.py | 2 +- boto/utils.py | 2 +- tests/integration/s3/test_key.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/boto/connection.py b/boto/connection.py index a178d1fdf6..051007df64 100644 --- a/boto/connection.py +++ b/boto/connection.py @@ -374,7 +374,7 @@ def authorize(self, connection, **kwargs): val = self.headers[key] if isinstance(val, unicode): safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~' - self.headers[key] = urllib.quote_plus(val.encode('utf-8'), safe) + self.headers[key] = urllib.quote(val.encode('utf-8'), safe) connection._auth_handler.add_auth(self, **kwargs) diff --git a/boto/utils.py b/boto/utils.py index 18d34f659d..9f071744cb 100644 --- a/boto/utils.py +++ b/boto/utils.py @@ -195,7 +195,7 @@ def get_aws_metadata(headers, provider=None): metadata = {} for hkey in headers.keys(): if hkey.lower().startswith(metadata_prefix): - val = urllib.unquote_plus(headers[hkey]) + val = urllib.unquote(headers[hkey]) try: metadata[hkey[len(metadata_prefix):]] = unicode(val, 'utf-8') except UnicodeDecodeError: diff --git a/tests/integration/s3/test_key.py b/tests/integration/s3/test_key.py index 4a897e62c0..defa95cbba 100644 --- a/tests/integration/s3/test_key.py +++ b/tests/integration/s3/test_key.py @@ -409,8 +409,8 @@ def test_header_encoding(self): self.assertEqual(check.cache_control, 'public, max-age=500') self.assertEqual(check.get_metadata('test-plus'), 'A plus (+)') - self.assertEqual(check.content_disposition, 'filename=Sch%C3%B6ne+Zeit.txt') + self.assertEqual(check.content_disposition, 'filename=Sch%C3%B6ne%20Zeit.txt') self.assertEqual( - urllib.unquote_plus(check.content_disposition).decode('utf-8'), + urllib.unquote(check.content_disposition).decode('utf-8'), 'filename=Schöne Zeit.txt'.decode('utf-8') ) From 28ed41a9cf2b972970fc2458c7da642655daf526 Mon Sep 17 00:00:00 2001 From: sukrit007 Date: Wed, 5 Mar 2014 18:27:13 -0700 Subject: [PATCH 09/60] Tests for (SWF:Adding support for passing region from Level2 API to Level1 API) --- tests/unit/swf/test_layer2_base.py | 31 ++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 tests/unit/swf/test_layer2_base.py diff --git a/tests/unit/swf/test_layer2_base.py b/tests/unit/swf/test_layer2_base.py new file mode 100644 index 0000000000..7790f458b1 --- /dev/null +++ b/tests/unit/swf/test_layer2_base.py @@ -0,0 +1,31 @@ +import boto.swf.layer2 +from boto.swf.layer2 import SWFBase +from tests.unit import unittest +from mock import Mock + + +MOCK_DOMAIN = 'Mock' +MOCK_ACCESS_KEY = 'inheritable access key' +MOCK_SECRET_KEY = 'inheritable secret key' +MOCK_REGION = 'Mock Region' + + +class TestBase(unittest.TestCase): + """ + Test for SWFBase. + """ + def setUp(self): + boto.swf.layer2.Layer1 = Mock() + self.swf_base = SWFBase( + domain=MOCK_DOMAIN, aws_access_key_id=MOCK_ACCESS_KEY, + aws_secret_access_key=MOCK_SECRET_KEY, region=MOCK_REGION + ) + + def test_instantiation(self): + self.assertEquals(MOCK_DOMAIN, self.swf_base.domain) + self.assertEquals(MOCK_ACCESS_KEY, self.swf_base.aws_access_key_id) + self.assertEquals(MOCK_SECRET_KEY, + self.swf_base.aws_secret_access_key) + self.assertEquals(MOCK_REGION, self.swf_base.region) + boto.swf.layer2.Layer1.assert_called_with( + MOCK_ACCESS_KEY, MOCK_SECRET_KEY, region=MOCK_REGION) From 2a9a98674d11b0a2dadaeff366eb761390010593 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Fri, 7 Mar 2014 11:58:29 -0800 Subject: [PATCH 10/60] Altered the Route53 bin script to UPSERT. --- bin/route53 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/route53 b/bin/route53 index 488a9ca913..fcdea70be9 100755 --- a/bin/route53 +++ b/bin/route53 @@ -131,7 +131,7 @@ def change_record(conn, hosted_zone_id, name, type, newvalues, ttl=600, for old_value in response.resource_records: change1.add_value(old_value) - change2 = changes.add_change("CREATE", name, type, ttl, + change2 = changes.add_change("UPSERT", name, type, ttl, identifier=identifier, weight=weight) for new_value in newvalues.split(','): change2.add_value(new_value) @@ -148,11 +148,11 @@ def change_alias(conn, hosted_zone_id, name, type, new_alias_hosted_zone_id, new continue if response.identifier != identifier or response.weight != weight: continue - change1 = changes.add_change("DELETE", name, type, + change1 = changes.add_change("DELETE", name, type, identifier=response.identifier, weight=response.weight) change1.set_alias(response.alias_hosted_zone_id, response.alias_dns_name) - change2 = changes.add_change("CREATE", name, type, identifier=identifier, weight=weight) + change2 = changes.add_change("UPSERT", name, type, identifier=identifier, weight=weight) change2.set_alias(new_alias_hosted_zone_id, new_alias_dns_name) print changes.commit() From b52767f3fb8b4fa85bd49934c80303d900797bc0 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Fri, 7 Mar 2014 16:46:52 -0800 Subject: [PATCH 11/60] Fixed default IAM policy documents in cn-north-1. --- boto/iam/connection.py | 73 ++++++++++++++++++++++++------- tests/unit/iam/test_connection.py | 72 ++++++++++++++++++++++++++++++ 2 files changed, 130 insertions(+), 15 deletions(-) diff --git a/boto/iam/connection.py b/boto/iam/connection.py index 4872b27493..e697b5ed3b 100644 --- a/boto/iam/connection.py +++ b/boto/iam/connection.py @@ -26,11 +26,32 @@ from boto.iam.summarymap import SummaryMap from boto.connection import AWSQueryConnection - -ASSUME_ROLE_POLICY_DOCUMENT = json.dumps({ - 'Statement': [{'Principal': {'Service': ['ec2.amazonaws.com']}, - 'Effect': 'Allow', - 'Action': ['sts:AssumeRole']}]}) +DEFAULT_POLICY_DOCUMENTS = { + 'default': { + 'Statement': [ + { + 'Principal': { + 'Service': ['ec2.amazonaws.com'] + }, + 'Effect': 'Allow', + 'Action': ['sts:AssumeRole'] + } + ] + }, + 'amazonaws.com.cn': { + 'Statement': [ + { + 'Principal': { + 'Service': ['ec2.amazonaws.com.cn'] + }, + 'Effect': 'Allow', + 'Action': ['sts:AssumeRole'] + } + ] + }, +} +# For backward-compatibility, we'll preserve this here. +ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default']) class IAMConnection(AWSQueryConnection): @@ -40,7 +61,7 @@ class IAMConnection(AWSQueryConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host='iam.amazonaws.com', - debug=0, https_connection_factory=None, path='/', + debug=0, https_connection_factory=None, path='/', security_token=None, validate_certs=True, profile_name=None): super(IAMConnection, self).__init__(aws_access_key_id, aws_secret_access_key, @@ -1059,6 +1080,30 @@ def create_instance_profile(self, instance_profile_name, path=None): params['Path'] = path return self.get_response('CreateInstanceProfile', params) + def _build_policy(self, assume_role_policy_document=None): + if assume_role_policy_document is not None: + if isinstance(assume_role_policy_document, basestring): + # Historically, they had to pass a string. If it's a string, + # assume the user has already handled it. + return assume_role_policy_document + else: + + for tld, policy in DEFAULT_POLICY_DOCUMENTS.items(): + if tld is 'default': + # Skip the default. We'll fall back to it if we don't find + # anything. + continue + + if self.host and self.host.endswith(tld): + assume_role_policy_document = policy + break + + if not assume_role_policy_document: + assume_role_policy_document = DEFAULT_POLICY_DOCUMENTS['default'] + + # Dump the policy (either user-supplied ``dict`` or one of the defaults) + return json.dumps(assume_role_policy_document) + def create_role(self, role_name, assume_role_policy_document=None, path=None): """ Creates a new role for your AWS account. @@ -1070,21 +1115,19 @@ def create_role(self, role_name, assume_role_policy_document=None, path=None): :type role_name: string :param role_name: Name of the role to create. - :type assume_role_policy_document: string + :type assume_role_policy_document: ``string`` or ``dict`` :param assume_role_policy_document: The policy that grants an entity permission to assume the role. :type path: string :param path: The path to the instance profile. """ - params = {'RoleName': role_name} - if assume_role_policy_document is None: - # This is the only valid assume_role_policy_document currently, so - # this is used as a default value if no assume_role_policy_document - # is provided. - params['AssumeRolePolicyDocument'] = ASSUME_ROLE_POLICY_DOCUMENT - else: - params['AssumeRolePolicyDocument'] = assume_role_policy_document + params = { + 'RoleName': role_name, + 'AssumeRolePolicyDocument': self._build_policy( + assume_role_policy_document + ), + } if path is not None: params['Path'] = path return self.get_response('CreateRole', params) diff --git a/tests/unit/iam/test_connection.py b/tests/unit/iam/test_connection.py index 2e3e8a4b1d..4762e183e2 100644 --- a/tests/unit/iam/test_connection.py +++ b/tests/unit/iam/test_connection.py @@ -164,3 +164,75 @@ def test_delete_saml_provider(self): 'SAMLProviderArn': 'arn' }, ignore_params_values=['Version']) + + +class TestCreateRole(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return """ + + + + /application_abc/component_xyz/ + arn:aws:iam::123456789012:role/application_abc/component_xyz/S3Access + S3Access + {"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"Service":["ec2.amazonaws.com"]},"Action":["sts:AssumeRole"]}]} + 2012-05-08T23:34:01.495Z + AROADBQP57FF2AEXAMPLE + + + + 4a93ceee-9966-11e1-b624-b1aEXAMPLE7c + + + """ + + def test_create_role_default(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_role('a_name') + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"Statement": [{"Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": {"Service": ["ec2.amazonaws.com"]}}]}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) + + def test_create_role_default_cn_north(self): + self.set_http_response(status_code=200) + self.service_connection.host = 'iam.cn-north-1.amazonaws.com.cn' + response = self.service_connection.create_role('a_name') + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"Statement": [{"Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": {"Service": ["ec2.amazonaws.com.cn"]}}]}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) + + def test_create_role_string_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_role( + 'a_name', + # Historical usage. + assume_role_policy_document='{"hello": "policy"}' + ) + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"hello": "policy"}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) + + def test_create_role_data_policy(self): + self.set_http_response(status_code=200) + response = self.service_connection.create_role( + 'a_name', + # With plain data, we should dump it for them. + assume_role_policy_document={"hello": "policy"} + ) + + self.assert_request_parameters( + {'Action': 'CreateRole', + 'AssumeRolePolicyDocument': '{"hello": "policy"}', + 'RoleName': 'a_name'}, + ignore_params_values=['Version']) From 372926f29bf78f2179d348c74fe9949c5e086a86 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Fri, 7 Mar 2014 17:08:28 -0800 Subject: [PATCH 12/60] Comments about where else to update code. --- boto/auth.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/boto/auth.py b/boto/auth.py index 62446eb37d..2f6c873bc1 100644 --- a/boto/auth.py +++ b/boto/auth.py @@ -904,6 +904,9 @@ def _wrapper(self): return ['hmac-v4'] if hasattr(self, 'region'): + # If you're making changes here, you should also check + # ``boto/iam/connection.py``, as several things there are also + # endpoint-related. if getattr(self.region, 'endpoint', ''): if '.cn-' in self.region.endpoint: return ['hmac-v4'] @@ -921,6 +924,9 @@ def _wrapper(self): return ['hmac-v4-s3'] if hasattr(self, 'host'): + # If you're making changes here, you should also check + # ``boto/iam/connection.py``, as several things there are also + # endpoint-related. if '.cn-' in self.host: return ['hmac-v4-s3'] From caa048c4f142c93c6c142bc189afe2cbe53d13d5 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Fri, 7 Mar 2014 17:09:18 -0800 Subject: [PATCH 13/60] Fixed ``get_signin_url`` behavior. This is pretty much broken right now. This improves it & should make cn-north-1 work correctly now as well. --- boto/iam/connection.py | 26 +++++++++++- tests/unit/iam/test_connection.py | 68 +++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/boto/iam/connection.py b/boto/iam/connection.py index e697b5ed3b..c6ee30070f 100644 --- a/boto/iam/connection.py +++ b/boto/iam/connection.py @@ -1027,13 +1027,35 @@ def get_signin_url(self, service='ec2'): :param service: Default service to go to in the console. """ alias = self.get_account_alias() + if not alias: raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.') + resp = alias.get('list_account_aliases_response', {}) + result = resp.get('list_account_aliases_result', {}) + aliases = result.get('account_aliases', []) + + if not len(aliases): + raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.') + + # We'll just use the first one we find. + alias = aliases[0] + if self.host == 'iam.us-gov.amazonaws.com': - return "https://%s.signin.amazonaws-us-gov.com/console/%s" % (alias, service) + return "https://%s.signin.amazonaws-us-gov.com/console/%s" % ( + alias, + service + ) + elif self.host.endswith('amazonaws.com.cn'): + return "https://%s.signin.amazonaws.cn/console/%s" % ( + alias, + service + ) else: - return "https://%s.signin.aws.amazon.com/console/%s" % (alias, service) + return "https://%s.signin.aws.amazon.com/console/%s" % ( + alias, + service + ) def get_account_summary(self): """ diff --git a/tests/unit/iam/test_connection.py b/tests/unit/iam/test_connection.py index 4762e183e2..692063b846 100644 --- a/tests/unit/iam/test_connection.py +++ b/tests/unit/iam/test_connection.py @@ -236,3 +236,71 @@ def test_create_role_data_policy(self): 'AssumeRolePolicyDocument': '{"hello": "policy"}', 'RoleName': 'a_name'}, ignore_params_values=['Version']) + + +class TestGetSigninURL(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return """ + + + false + + foocorporation + anotherunused + + + + c5a076e9-f1b0-11df-8fbe-45274EXAMPLE + + + """ + + def test_get_signin_url_default(self): + self.set_http_response(status_code=200) + url = self.service_connection.get_signin_url() + self.assertEqual( + url, + 'https://foocorporation.signin.aws.amazon.com/console/ec2' + ) + + def test_get_signin_url_s3(self): + self.set_http_response(status_code=200) + url = self.service_connection.get_signin_url(service='s3') + self.assertEqual( + url, + 'https://foocorporation.signin.aws.amazon.com/console/s3' + ) + + def test_get_signin_url_cn_north(self): + self.set_http_response(status_code=200) + self.service_connection.host = 'iam.cn-north-1.amazonaws.com.cn' + url = self.service_connection.get_signin_url() + self.assertEqual( + url, + 'https://foocorporation.signin.aws.amazon.com/console/ec2' + ) + + +class TestGetSigninURL(AWSMockServiceTestCase): + connection_class = IAMConnection + + def default_body(self): + return """ + + + false + + + + c5a076e9-f1b0-11df-8fbe-45274EXAMPLE + + + """ + + def test_get_signin_url_no_aliases(self): + self.set_http_response(status_code=200) + + with self.assertRaises(Exception): + self.service_connection.get_signin_url() From 0e7d5492705915aff6dc17b7b547dfc13f8091d9 Mon Sep 17 00:00:00 2001 From: Zihao Yu Date: Tue, 11 Mar 2014 09:10:43 -0400 Subject: [PATCH 14/60] change alias_evaluate_target_health to boolean --- boto/route53/record.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boto/route53/record.py b/boto/route53/record.py index e04e009e27..87359b167b 100644 --- a/boto/route53/record.py +++ b/boto/route53/record.py @@ -341,7 +341,7 @@ def endElement(self, name, value, connection): elif name == 'SetIdentifier': self.identifier = value elif name == 'EvaluateTargetHealth': - self.alias_evaluate_target_health = value + self.alias_evaluate_target_health = value.lower() == 'true' elif name == 'Weight': self.weight = value elif name == 'Region': From d2834760019546bb15c5071ddd8392e66d11cb96 Mon Sep 17 00:00:00 2001 From: Zihao Yu Date: Tue, 11 Mar 2014 09:11:00 -0400 Subject: [PATCH 15/60] update unit tests of route53 --- tests/unit/route53/test_connection.py | 46 ++++++++++++++++++++------- 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/tests/unit/route53/test_connection.py b/tests/unit/route53/test_connection.py index 34e803dd99..5859be43ae 100644 --- a/tests/unit/route53/test_connection.py +++ b/tests/unit/route53/test_connection.py @@ -263,14 +263,25 @@ def default_body(self): - us-west-2.example.com. + us-west-2-evaluate-health.example.com. A - latency-example-us-west-2 + latency-example-us-west-2-evaluate-health us-west-2 ABCDEFG123456 true - example-123456.us-west-2.elb.amazonaws.com. + example-123456-evaluate-health.us-west-2.elb.amazonaws.com. + + + + us-west-2-no-evaluate-health.example.com. + A + latency-example-us-west-2-no-evaluate-health + us-west-2 + + ABCDEFG567890 + false + example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com. @@ -294,11 +305,24 @@ def test_get_all_rr_sets(self): self.assertTrue(response[0].ttl, "60") self.assertTrue(response[0].type, "A") - latency_record = response[2] - self.assertEqual(latency_record.name, 'us-west-2.example.com.') - self.assertEqual(latency_record.type, 'A') - self.assertEqual(latency_record.identifier, 'latency-example-us-west-2') - self.assertEqual(latency_record.region, 'us-west-2') - self.assertEqual(latency_record.alias_hosted_zone_id, 'ABCDEFG123456') - self.assertEqual(latency_record.alias_evaluate_target_health, 'true') - self.assertEqual(latency_record.alias_dns_name, 'example-123456.us-west-2.elb.amazonaws.com.') + evaluate_record = response[2] + self.assertEqual(evaluate_record.name, 'us-west-2-evaluate-health.example.com.') + self.assertEqual(evaluate_record.type, 'A') + self.assertEqual(evaluate_record.identifier, 'latency-example-us-west-2-evaluate-health') + self.assertEqual(evaluate_record.region, 'us-west-2') + self.assertEqual(evaluate_record.alias_hosted_zone_id, 'ABCDEFG123456') + self.assertTrue(evaluate_record.alias_evaluate_target_health) + self.assertEqual(evaluate_record.alias_dns_name, 'example-123456-evaluate-health.us-west-2.elb.amazonaws.com.') + evaluate_xml = evaluate_record.to_xml() + self.assertTrue('true' in evaluate_xml) + + no_evaluate_record = response[3] + self.assertEqual(no_evaluate_record.name, 'us-west-2-no-evaluate-health.example.com.') + self.assertEqual(no_evaluate_record.type, 'A') + self.assertEqual(no_evaluate_record.identifier, 'latency-example-us-west-2-no-evaluate-health') + self.assertEqual(no_evaluate_record.region, 'us-west-2') + self.assertEqual(no_evaluate_record.alias_hosted_zone_id, 'ABCDEFG567890') + self.assertFalse(no_evaluate_record.alias_evaluate_target_health) + self.assertEqual(no_evaluate_record.alias_dns_name, 'example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com.') + no_evaluate_xml = no_evaluate_record.to_xml() + self.assertTrue('false' in no_evaluate_xml) From 2c673076c94639b1ef99d16f757d8714313e5bea Mon Sep 17 00:00:00 2001 From: Nikola Kotur Date: Tue, 11 Mar 2014 14:32:44 +0100 Subject: [PATCH 16/60] Documentation fix. --- boto/gs/key.py | 125 +++++++++++++++++++++++++------------------------ 1 file changed, 65 insertions(+), 60 deletions(-) diff --git a/boto/gs/key.py b/boto/gs/key.py index 277e7c7150..bc862b3aeb 100644 --- a/boto/gs/key.py +++ b/boto/gs/key.py @@ -410,19 +410,20 @@ def set_contents_from_file(self, fp, headers=None, replace=True, contents. :type fp: file - :param fp: the file whose contents are to be uploaded + :param fp: The file whose contents are to be uploaded. :type headers: dict - :param headers: additional HTTP headers to be sent with the PUT request. + :param headers: (optional) Additional HTTP headers to be sent with the + PUT request. :type replace: bool - :param replace: If this parameter is False, the method will first check - to see if an object exists in the bucket with the same key. If it - does, it won't overwrite it. The default value is True which will - overwrite the object. + :param replace: (optional) If this parameter is False, the method will + first check to see if an object exists in the bucket with the same + key. If it does, it won't overwrite it. The default value is True + which will overwrite the object. :type cb: function - :param cb: a callback function that will be called to report + :param cb: (optional) Callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to GS and the second representing the @@ -435,43 +436,44 @@ def set_contents_from_file(self, fp, headers=None, replace=True, during the file transfer. :type policy: :class:`boto.gs.acl.CannedACLStrings` - :param policy: A canned ACL policy that will be applied to the new key - in GS. + :param policy: (optional) A canned ACL policy that will be applied to + the new key in GS. - :type md5: A tuple containing the hexdigest version of the MD5 checksum - of the file as the first element and the Base64-encoded version of - the plain checksum as the second element. This is the same format - returned by the compute_md5 method. - :param md5: If you need to compute the MD5 for any reason prior to - upload, it's silly to have to do it twice so this param, if present, - will be used as the MD5 values of the file. Otherwise, the checksum - will be computed. + :type md5: tuple + :param md5: (optional) A tuple containing the hexdigest version of the + MD5 checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. - :type res_upload_handler: ResumableUploadHandler - :param res_upload_handler: If provided, this handler will perform the - upload. + If you need to compute the MD5 for any reason prior to upload, it's + silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be + computed. + + :type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler` + :param res_upload_handler: (optional) If provided, this handler will + perform the upload. :type size: int - :param size: (optional) The Maximum number of bytes to read from - the file pointer (fp). This is useful when uploading - a file in multiple parts where you are splitting the - file up into different ranges to be uploaded. If not - specified, the default behaviour is to read all bytes - from the file pointer. Less bytes may be available. + :param size: (optional) The Maximum number of bytes to read from the + file pointer (fp). This is useful when uploading a file in multiple + parts where you are splitting the file up into different ranges to + be uploaded. If not specified, the default behaviour is to read all + bytes from the file pointer. Less bytes may be available. + Notes: - 1. The "size" parameter currently cannot be used when - a resumable upload handler is given but is still - useful for uploading part of a file as implemented - by the parent class. - 2. At present Google Cloud Storage does not support - multipart uploads. + 1. The "size" parameter currently cannot be used when a + resumable upload handler is given but is still useful for + uploading part of a file as implemented by the parent class. + 2. At present Google Cloud Storage does not support multipart + uploads. :type rewind: bool - :param rewind: (optional) If True, the file pointer (fp) will be - rewound to the start before any bytes are read from - it. The default behaviour is False which reads from - the current position of the file pointer (fp). + :param rewind: (optional) If True, the file pointer (fp) will be + rewound to the start before any bytes are read from it. The default + behaviour is False which reads from the current position of the + file pointer (fp). :type if_generation: int :param if_generation: (optional) If set to a generation number, the @@ -588,44 +590,47 @@ def set_contents_from_filename(self, filename, headers=None, replace=True, parameters. :type filename: string - :param filename: The name of the file that you want to put onto GS + :param filename: The name of the file that you want to put onto GS. :type headers: dict - :param headers: Additional headers to pass along with the request to GS. + :param headers: (optional) Additional headers to pass along with the + request to GS. :type replace: bool - :param replace: If True, replaces the contents of the file if it - already exists. + :param replace: (optional) If True, replaces the contents of the file + if it already exists. :type cb: function - :param cb: (optional) a callback function that will be called to report - progress on the download. The callback should accept two integer + :param cb: (optional) Callback function that will be called to report + progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have - been successfully transmitted from GS and the second representing - the total number of bytes that need to be transmitted. + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. - :type cb: int + :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. - :type policy: :class:`boto.gs.acl.CannedACLStrings` - :param policy: A canned ACL policy that will be applied to the new key - in GS. + :type policy: :py:attribute:`boto.gs.acl.CannedACLStrings` + :param policy: (optional) A canned ACL policy that will be applied to + the new key in GS. + + :type md5: tuple + :param md5: (optional) A tuple containing the hexdigest version of the + MD5 checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. + + If you need to compute the MD5 for any reason prior to upload, it's + silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be + computed. - :type md5: A tuple containing the hexdigest version of the MD5 checksum - of the file as the first element and the Base64-encoded version of - the plain checksum as the second element. This is the same format - returned by the compute_md5 method. - :param md5: If you need to compute the MD5 for any reason prior to - upload, it's silly to have to do it twice so this param, if present, - will be used as the MD5 values of the file. Otherwise, the checksum - will be computed. - - :type res_upload_handler: ResumableUploadHandler - :param res_upload_handler: If provided, this handler will perform the - upload. + :type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler` + :param res_upload_handler: (optional) If provided, this handler will + perform the upload. :type if_generation: int :param if_generation: (optional) If set to a generation number, the From 74c677170c0c6ae63e255a11fd1daa1537f52ebc Mon Sep 17 00:00:00 2001 From: Ed Sesek Date: Tue, 11 Mar 2014 11:11:29 -0700 Subject: [PATCH 17/60] Update vault.py Changed from using isoformat() to strftime() to match what AWS accepts for start_date and end_date --- boto/glacier/vault.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/boto/glacier/vault.py b/boto/glacier/vault.py index e7d4e27d24..bb9fa4cc6b 100644 --- a/boto/glacier/vault.py +++ b/boto/glacier/vault.py @@ -343,9 +343,9 @@ def retrieve_inventory(self, sns_topic=None, rparams = {} if start_date is not None: - rparams['StartDate'] = start_date.isoformat() + rparams['StartDate'] = start_date.strftime('%Y-%m-%dT%H:%M:%S%Z') if end_date is not None: - rparams['EndDate'] = end_date.isoformat() + rparams['EndDate'] = end_date.strftime('%Y-%m-%dT%H:%M:%S%Z') if limit is not None: rparams['Limit'] = limit From 81ef7fe6483c38c86a479e838b1999a29a8f4cd9 Mon Sep 17 00:00:00 2001 From: Ed Sesek Date: Tue, 11 Mar 2014 17:42:55 -0700 Subject: [PATCH 18/60] Update test_layer2.py Modified unit test for test_initiate_job to check for properly formatted start_date and end_date parameters to retrieve_inventory. --- tests/unit/glacier/test_layer2.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/tests/unit/glacier/test_layer2.py b/tests/unit/glacier/test_layer2.py index 0c69688046..538b89aeb6 100644 --- a/tests/unit/glacier/test_layer2.py +++ b/tests/unit/glacier/test_layer2.py @@ -33,7 +33,7 @@ from StringIO import StringIO -from datetime import datetime +from datetime import datetime, tzinfo, timedelta # Some fixture data from the Glacier docs FIXTURE_VAULT = { @@ -211,16 +211,28 @@ def test_delete_vault(self): "archive") def test_initiate_job(self): + class UTC(tzinfo): + """UTC""" + + def utcoffset(self, dt): + return timedelta(0) + + def tzname(self, dt): + return "Z" + + def dst(self, dt): + return timedelta(0) + self.mock_layer1.initiate_job.return_value = {'JobId': 'job-id'} - self.vault.retrieve_inventory(start_date=datetime(2014, 01, 01), - end_date=datetime(2014, 01, 02), + self.vault.retrieve_inventory(start_date=datetime(2014, 01, 01, tzinfo=UTC()), + end_date=datetime(2014, 01, 02, tzinfo=UTC()), limit=100) self.mock_layer1.initiate_job.assert_called_with( 'examplevault', { 'Type': 'inventory-retrieval', 'InventoryRetrievalParameters': { - 'StartDate': '2014-01-01T00:00:00', - 'EndDate': '2014-01-02T00:00:00', + 'StartDate': '2014-01-01T00:00:00Z', + 'EndDate': '2014-01-02T00:00:00Z', 'Limit': 100 } }) From 9dbfaad17ea5d718d23ba12621e9991291022291 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Thu, 13 Mar 2014 15:47:50 -0700 Subject: [PATCH 19/60] Added missed tests. --- tests/unit/ecs/__init__.py | 0 tests/unit/ecs/test_connection.py | 70 +++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) create mode 100644 tests/unit/ecs/__init__.py create mode 100644 tests/unit/ecs/test_connection.py diff --git a/tests/unit/ecs/__init__.py b/tests/unit/ecs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/ecs/test_connection.py b/tests/unit/ecs/test_connection.py new file mode 100644 index 0000000000..ed291c797b --- /dev/null +++ b/tests/unit/ecs/test_connection.py @@ -0,0 +1,70 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from tests.unit import unittest +from boto.ecs import ECSConnection +from tests.unit import AWSMockServiceTestCase + + +class TestECSConnection(AWSMockServiceTestCase): + connection_class = ECSConnection + + def default_body(self): + return """ + + + True + + B00008OE6I + + + + B00008OE6I + + Canon + Photography + Canon PowerShot S400 4MP Digital Camera w/ 3x Optical Zoom + + + + """ + + def test_item_lookup(self): + self.set_http_response(status_code=200) + item_set = self.service_connection.item_lookup( + ItemId='0316067938', + ResponseGroup='Reviews' + ) + + self.assert_request_parameters( + {'ItemId': '0316067938', + 'Operation': 'ItemLookup', + 'ResponseGroup': 'Reviews', + 'Service': 'AWSECommerceService'}, + ignore_params_values=['Version', 'AWSAccessKeyId', + 'SignatureMethod', 'SignatureVersion', + 'Timestamp']) + + items = list(item_set) + self.assertEqual(len(items), 1) + self.assertTrue(item_set.is_valid) + self.assertEqual(items[0].ASIN, 'B00008OE6I') From 6e5ed17803e14fa2513bdd12923218f45cbbd92a Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Fri, 14 Mar 2014 12:54:11 -0700 Subject: [PATCH 20/60] Fixed exception handling with loaded data. --- boto/exception.py | 59 ++++++++++++++--------- tests/integration/kinesis/test_kinesis.py | 19 +++++--- 2 files changed, 50 insertions(+), 28 deletions(-) diff --git a/boto/exception.py b/boto/exception.py index 99205c9f28..fd037ff8a4 100644 --- a/boto/exception.py +++ b/boto/exception.py @@ -85,29 +85,44 @@ def __init__(self, status, reason, body=None, *args): # Attempt to parse the error response. If body isn't present, # then just ignore the error response. if self.body: - try: - h = handler.XmlHandlerWrapper(self, self) - h.parseString(self.body) - except (TypeError, xml.sax.SAXParseException), pe: - # What if it's JSON? Let's try that. + # Check if it looks like a ``dict``. + if hasattr(self.body, 'items'): + # It's not a string, so trying to parse it will fail. + # But since it's data, we can work with that. + self.request_id = self.body.get('RequestId', None) + + if 'Error' in self.body: + # XML-style + error = self.body.get('Error', {}) + self.error_code = error.get('Code', None) + self.message = error.get('Message', None) + else: + # JSON-style. + self.message = self.body.get('message', None) + else: try: - parsed = json.loads(self.body) - - if 'RequestId' in parsed: - self.request_id = parsed['RequestId'] - if 'Error' in parsed: - if 'Code' in parsed['Error']: - self.error_code = parsed['Error']['Code'] - if 'Message' in parsed['Error']: - self.message = parsed['Error']['Message'] - - except ValueError: - # Remove unparsable message body so we don't include garbage - # in exception. But first, save self.body in self.error_message - # because occasionally we get error messages from Eucalyptus - # that are just text strings that we want to preserve. - self.message = self.body - self.body = None + h = handler.XmlHandlerWrapper(self, self) + h.parseString(self.body) + except (TypeError, xml.sax.SAXParseException), pe: + # What if it's JSON? Let's try that. + try: + parsed = json.loads(self.body) + + if 'RequestId' in parsed: + self.request_id = parsed['RequestId'] + if 'Error' in parsed: + if 'Code' in parsed['Error']: + self.error_code = parsed['Error']['Code'] + if 'Message' in parsed['Error']: + self.message = parsed['Error']['Message'] + + except (TypeError, ValueError): + # Remove unparsable message body so we don't include garbage + # in exception. But first, save self.body in self.error_message + # because occasionally we get error messages from Eucalyptus + # that are just text strings that we want to preserve. + self.message = self.body + self.body = None def __getattr__(self, name): if name == 'error_message': diff --git a/tests/integration/kinesis/test_kinesis.py b/tests/integration/kinesis/test_kinesis.py index 3930f2f7fe..3f006002ad 100644 --- a/tests/integration/kinesis/test_kinesis.py +++ b/tests/integration/kinesis/test_kinesis.py @@ -20,9 +20,11 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -import boto import time +import boto +from boto.kinesis.exceptions import ResourceNotFoundException + from unittest import TestCase @@ -34,15 +36,12 @@ class TestKinesis(TestCase): def setUp(self): self.kinesis = boto.connect_kinesis() - def tearDown(self): - # Delete the stream even if there is a failure - self.kinesis.delete_stream('test') - def test_kinesis(self): kinesis = self.kinesis # Create a new stream kinesis.create_stream('test', 1) + self.addCleanup(self.kinesis.delete_stream, 'test') # Wait for the stream to be ready tries = 0 @@ -70,7 +69,7 @@ def test_kinesis(self): while tries < 100: tries += 1 time.sleep(1) - + response = kinesis.get_records(shard_iterator) shard_iterator = response['NextShardIterator'] @@ -82,3 +81,11 @@ def test_kinesis(self): # Read the data, which should be the same as what we wrote self.assertEqual(1, len(response['Records'])) self.assertEqual(data, response['Records'][0]['Data']) + + def test_describe_non_existent_stream(self): + with self.assertRaises(ResourceNotFoundException) as cm: + self.kinesis.describe_stream('this-stream-shouldnt-exist') + + # Assert things about the data we passed along. + self.assertEqual(cm.exception.error_code, None) + self.assertTrue('not found' in cm.exception.message) From 58e3c2793e1a798f5a8f5cb3bdc049adf9d26337 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Fri, 14 Mar 2014 14:54:26 -0700 Subject: [PATCH 21/60] Fixed how ``reverse`` works in DDBv2. --- boto/dynamodb2/table.py | 8 +- tests/integration/dynamodb2/test_highlevel.py | 80 +++++++++++++++++++ tests/unit/dynamodb2/test_table.py | 6 +- 3 files changed, 88 insertions(+), 6 deletions(-) diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py index 338ced19a0..504b4d8bc0 100644 --- a/boto/dynamodb2/table.py +++ b/boto/dynamodb2/table.py @@ -824,7 +824,7 @@ def query(self, limit=None, index=None, reverse=False, consistent=False, (Default: ``None``) Optionally accepts a ``reverse`` parameter, which will present the - results in reverse order. (Default: ``None`` - normal order) + results in reverse order. (Default: ``False`` - normal order) Optionally accepts a ``consistent`` parameter, which should be a boolean. If you provide ``True``, it will force a consistent read of @@ -975,12 +975,14 @@ def _query(self, limit=None, index=None, reverse=False, consistent=False, kwargs = { 'limit': limit, 'index_name': index, - 'scan_index_forward': reverse, 'consistent_read': consistent, 'select': select, - 'attributes_to_get': attributes_to_get + 'attributes_to_get': attributes_to_get, } + if reverse: + kwargs['scan_index_forward'] = False + if exclusive_start_key: kwargs['exclusive_start_key'] = {} diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py index 1e3335b704..8d57168f15 100644 --- a/tests/integration/dynamodb2/test_highlevel.py +++ b/tests/integration/dynamodb2/test_highlevel.py @@ -516,3 +516,83 @@ def test_query_with_limits(self): ['joe', 'jane', 'joe', 'joe', 'jane', 'joe'] ) self.assertEqual(results._fetches, 3) + + def test_query_with_reverse(self): + posts = Table.create('posts', schema=[ + HashKey('thread'), + RangeKey('posted_on') + ], throughput={ + 'read': 5, + 'write': 5, + }) + self.addCleanup(posts.delete) + + # Wait for it. + time.sleep(60) + + # Add some data. + test_data_path = os.path.join( + os.path.dirname(__file__), + 'forum_test_data.json' + ) + with open(test_data_path, 'r') as test_data: + data = json.load(test_data) + + with posts.batch_write() as batch: + for post in data: + batch.put_item(post) + + time.sleep(5) + + # Test the default order (ascending). + results = posts.query( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00' + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) + + # Test the explicit ascending order. + results = posts.query( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=False + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) + + # Test the explicit descending order. + results = posts.query( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=True + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T15:22:22', + '2013-12-24T14:25:33', + '2013-12-24T14:15:14', + '2013-12-24T13:45:30', + '2013-12-24T12:35:40', + '2013-12-24T12:30:54', + ] + ) diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py index 52ec53ddc1..b7789c7b82 100644 --- a/tests/unit/dynamodb2/test_table.py +++ b/tests/unit/dynamodb2/test_table.py @@ -883,7 +883,7 @@ def test_max_page_size_and_bigger_limit_fetch_more(self): self.results.fetch_more() self.result_function.assert_called_with('john', greeting='Hello', limit=10) self.result_function.reset_mock() - + def test_fetch_more(self): # First "page". self.results.fetch_more() @@ -2149,7 +2149,7 @@ def test_private_query(self): mock_query.assert_called_once_with('users', consistent_read=False, - scan_index_forward=True, + scan_index_forward=False, index_name=None, attributes_to_get=None, limit=4, @@ -2196,7 +2196,7 @@ def test_private_query(self): }, index_name=None, attributes_to_get=None, - scan_index_forward=True, + scan_index_forward=False, limit=4, exclusive_start_key={ 'username': { From 90672c40636eb4c84e36ae8c45236086a8bacdd8 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Fri, 14 Mar 2014 15:09:40 -0700 Subject: [PATCH 22/60] Test fixes. --- tests/integration/dynamodb2/test_highlevel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py index 8d57168f15..5d6244458a 100644 --- a/tests/integration/dynamodb2/test_highlevel.py +++ b/tests/integration/dynamodb2/test_highlevel.py @@ -515,10 +515,10 @@ def test_query_with_limits(self): [post['posted_by'] for post in all_posts], ['joe', 'jane', 'joe', 'joe', 'jane', 'joe'] ) - self.assertEqual(results._fetches, 3) + self.assertTrue(results._fetches >= 3) def test_query_with_reverse(self): - posts = Table.create('posts', schema=[ + posts = Table.create('more-posts', schema=[ HashKey('thread'), RangeKey('posted_on') ], throughput={ From 3db14256c8b4a8afeae1ec8a66bd91c1a89d7ce5 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Fri, 14 Mar 2014 15:32:18 -0700 Subject: [PATCH 23/60] Fixed doing ``describe`` on complex tables. --- boto/dynamodb2/table.py | 25 ++++++++++---- tests/unit/dynamodb2/test_table.py | 53 ++++++++++++++++++++++++++++-- 2 files changed, 69 insertions(+), 9 deletions(-) diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py index 338ced19a0..da91034b82 100644 --- a/boto/dynamodb2/table.py +++ b/boto/dynamodb2/table.py @@ -7,7 +7,8 @@ from boto.dynamodb2.items import Item from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2.results import ResultSet, BatchGetResultSet -from boto.dynamodb2.types import Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS +from boto.dynamodb2.types import (Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS, + STRING) from boto.exception import JSONResponseError @@ -232,18 +233,29 @@ def create(cls, table_name, schema, throughput=None, indexes=None, ) return table - def _introspect_schema(self, raw_schema): + def _introspect_schema(self, raw_schema, raw_attributes=None): """ Given a raw schema structure back from a DynamoDB response, parse out & build the high-level Python objects that represent them. """ schema = [] + sane_attributes = {} + + if raw_attributes: + for field in raw_attributes: + sane_attributes[field['AttributeName']] = field['AttributeType'] for field in raw_schema: + data_type = sane_attributes.get(field['AttributeName'], STRING) + if field['KeyType'] == 'HASH': - schema.append(HashKey(field['AttributeName'])) + schema.append( + HashKey(field['AttributeName'], data_type=data_type) + ) elif field['KeyType'] == 'RANGE': - schema.append(RangeKey(field['AttributeName'])) + schema.append( + RangeKey(field['AttributeName'], data_type=data_type) + ) else: raise exceptions.UnknownSchemaFieldError( "%s was seen, but is unknown. Please report this at " @@ -280,7 +292,7 @@ def _introspect_indexes(self, raw_indexes): ) name = field['IndexName'] - kwargs['parts'] = self._introspect_schema(field['KeySchema']) + kwargs['parts'] = self._introspect_schema(field['KeySchema'], None) indexes.append(index_klass(name, **kwargs)) return indexes @@ -319,7 +331,8 @@ def describe(self): if not self.schema: # Since we have the data, build the schema. raw_schema = result['Table'].get('KeySchema', []) - self.schema = self._introspect_schema(raw_schema) + raw_attributes = result['Table'].get('AttributeDefinitions', []) + self.schema = self._introspect_schema(raw_schema, raw_attributes) if not self.indexes: # Build the index information as well. diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py index 52ec53ddc1..7fd91004a3 100644 --- a/tests/unit/dynamodb2/test_table.py +++ b/tests/unit/dynamodb2/test_table.py @@ -9,7 +9,7 @@ from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2.results import ResultSet, BatchGetResultSet from boto.dynamodb2.table import Table -from boto.dynamodb2.types import (STRING, NUMBER, +from boto.dynamodb2.types import (STRING, NUMBER, BINARY, FILTER_OPERATORS, QUERY_OPERATORS) from boto.exception import JSONResponseError @@ -1154,7 +1154,17 @@ def test__introspect_schema(self): "KeyType": "RANGE" } ] - schema_1 = self.users._introspect_schema(raw_schema_1) + raw_attributes_1 = [ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'date_joined', + 'AttributeType': 'S' + }, + ] + schema_1 = self.users._introspect_schema(raw_schema_1, raw_attributes_1) self.assertEqual(len(schema_1), 2) self.assertTrue(isinstance(schema_1[0], HashKey)) self.assertEqual(schema_1[0].name, 'username') @@ -1167,12 +1177,49 @@ def test__introspect_schema(self): "KeyType": "BTREE" }, ] + raw_attributes_2 = [ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + ] self.assertRaises( exceptions.UnknownSchemaFieldError, self.users._introspect_schema, - raw_schema_2 + raw_schema_2, + raw_attributes_2 ) + # Test a complex schema & ensure the types come back correctly. + raw_schema_3 = [ + { + "AttributeName": "user_id", + "KeyType": "HASH" + }, + { + "AttributeName": "junk", + "KeyType": "RANGE" + } + ] + raw_attributes_3 = [ + { + 'AttributeName': 'user_id', + 'AttributeType': 'N' + }, + { + 'AttributeName': 'junk', + 'AttributeType': 'B' + }, + ] + schema_3 = self.users._introspect_schema(raw_schema_3, raw_attributes_3) + self.assertEqual(len(schema_3), 2) + self.assertTrue(isinstance(schema_3[0], HashKey)) + self.assertEqual(schema_3[0].name, 'user_id') + self.assertEqual(schema_3[0].data_type, NUMBER) + self.assertTrue(isinstance(schema_3[1], RangeKey)) + self.assertEqual(schema_3[1].name, 'junk') + self.assertEqual(schema_3[1].data_type, BINARY) + def test__introspect_indexes(self): raw_indexes_1 = [ { From 79f2800ff45be157700ae2067c3e689da42bb8bf Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Sat, 15 Mar 2014 20:32:10 -0700 Subject: [PATCH 24/60] Revert "Test fixes." This reverts commit 90672c40636eb4c84e36ae8c45236086a8bacdd8. --- tests/integration/dynamodb2/test_highlevel.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py index 5d6244458a..8d57168f15 100644 --- a/tests/integration/dynamodb2/test_highlevel.py +++ b/tests/integration/dynamodb2/test_highlevel.py @@ -515,10 +515,10 @@ def test_query_with_limits(self): [post['posted_by'] for post in all_posts], ['joe', 'jane', 'joe', 'joe', 'jane', 'joe'] ) - self.assertTrue(results._fetches >= 3) + self.assertEqual(results._fetches, 3) def test_query_with_reverse(self): - posts = Table.create('more-posts', schema=[ + posts = Table.create('posts', schema=[ HashKey('thread'), RangeKey('posted_on') ], throughput={ From 32a80ee16068c7aa12a3d7086513816a1412ac52 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Sat, 15 Mar 2014 20:32:20 -0700 Subject: [PATCH 25/60] Revert "Fixed how ``reverse`` works in DDBv2." This reverts commit 58e3c2793e1a798f5a8f5cb3bdc049adf9d26337. --- boto/dynamodb2/table.py | 8 +- tests/integration/dynamodb2/test_highlevel.py | 80 ------------------- tests/unit/dynamodb2/test_table.py | 6 +- 3 files changed, 6 insertions(+), 88 deletions(-) diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py index f9e5ede1d1..da91034b82 100644 --- a/boto/dynamodb2/table.py +++ b/boto/dynamodb2/table.py @@ -837,7 +837,7 @@ def query(self, limit=None, index=None, reverse=False, consistent=False, (Default: ``None``) Optionally accepts a ``reverse`` parameter, which will present the - results in reverse order. (Default: ``False`` - normal order) + results in reverse order. (Default: ``None`` - normal order) Optionally accepts a ``consistent`` parameter, which should be a boolean. If you provide ``True``, it will force a consistent read of @@ -988,14 +988,12 @@ def _query(self, limit=None, index=None, reverse=False, consistent=False, kwargs = { 'limit': limit, 'index_name': index, + 'scan_index_forward': reverse, 'consistent_read': consistent, 'select': select, - 'attributes_to_get': attributes_to_get, + 'attributes_to_get': attributes_to_get } - if reverse: - kwargs['scan_index_forward'] = False - if exclusive_start_key: kwargs['exclusive_start_key'] = {} diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py index 8d57168f15..1e3335b704 100644 --- a/tests/integration/dynamodb2/test_highlevel.py +++ b/tests/integration/dynamodb2/test_highlevel.py @@ -516,83 +516,3 @@ def test_query_with_limits(self): ['joe', 'jane', 'joe', 'joe', 'jane', 'joe'] ) self.assertEqual(results._fetches, 3) - - def test_query_with_reverse(self): - posts = Table.create('posts', schema=[ - HashKey('thread'), - RangeKey('posted_on') - ], throughput={ - 'read': 5, - 'write': 5, - }) - self.addCleanup(posts.delete) - - # Wait for it. - time.sleep(60) - - # Add some data. - test_data_path = os.path.join( - os.path.dirname(__file__), - 'forum_test_data.json' - ) - with open(test_data_path, 'r') as test_data: - data = json.load(test_data) - - with posts.batch_write() as batch: - for post in data: - batch.put_item(post) - - time.sleep(5) - - # Test the default order (ascending). - results = posts.query( - thread__eq='Favorite chiptune band?', - posted_on__gte='2013-12-24T00:00:00' - ) - self.assertEqual( - [post['posted_on'] for post in results], - [ - '2013-12-24T12:30:54', - '2013-12-24T12:35:40', - '2013-12-24T13:45:30', - '2013-12-24T14:15:14', - '2013-12-24T14:25:33', - '2013-12-24T15:22:22', - ] - ) - - # Test the explicit ascending order. - results = posts.query( - thread__eq='Favorite chiptune band?', - posted_on__gte='2013-12-24T00:00:00', - reverse=False - ) - self.assertEqual( - [post['posted_on'] for post in results], - [ - '2013-12-24T12:30:54', - '2013-12-24T12:35:40', - '2013-12-24T13:45:30', - '2013-12-24T14:15:14', - '2013-12-24T14:25:33', - '2013-12-24T15:22:22', - ] - ) - - # Test the explicit descending order. - results = posts.query( - thread__eq='Favorite chiptune band?', - posted_on__gte='2013-12-24T00:00:00', - reverse=True - ) - self.assertEqual( - [post['posted_on'] for post in results], - [ - '2013-12-24T15:22:22', - '2013-12-24T14:25:33', - '2013-12-24T14:15:14', - '2013-12-24T13:45:30', - '2013-12-24T12:35:40', - '2013-12-24T12:30:54', - ] - ) diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py index 41a31bafbb..7fd91004a3 100644 --- a/tests/unit/dynamodb2/test_table.py +++ b/tests/unit/dynamodb2/test_table.py @@ -883,7 +883,7 @@ def test_max_page_size_and_bigger_limit_fetch_more(self): self.results.fetch_more() self.result_function.assert_called_with('john', greeting='Hello', limit=10) self.result_function.reset_mock() - + def test_fetch_more(self): # First "page". self.results.fetch_more() @@ -2196,7 +2196,7 @@ def test_private_query(self): mock_query.assert_called_once_with('users', consistent_read=False, - scan_index_forward=False, + scan_index_forward=True, index_name=None, attributes_to_get=None, limit=4, @@ -2243,7 +2243,7 @@ def test_private_query(self): }, index_name=None, attributes_to_get=None, - scan_index_forward=False, + scan_index_forward=True, limit=4, exclusive_start_key={ 'username': { From c5e1a61887c341a60a51a6baed7b1a1144e7c594 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Fri, 14 Mar 2014 14:54:26 -0700 Subject: [PATCH 26/60] Reintroduced the ``reverse`` fix for DDB. This is more backward-compatible than the previous attempt. --- boto/dynamodb2/table.py | 22 ++- docs/source/dynamodb2_tut.rst | 21 ++- docs/source/migrations/dynamodb_v1_to_v2.rst | 2 +- tests/integration/dynamodb2/test_highlevel.py | 129 ++++++++++++++++-- tests/unit/dynamodb2/test_table.py | 10 +- 5 files changed, 160 insertions(+), 24 deletions(-) diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py index da91034b82..9795de672d 100644 --- a/boto/dynamodb2/table.py +++ b/boto/dynamodb2/table.py @@ -816,6 +816,20 @@ def _build_filters(self, filter_kwargs, using=QUERY_OPERATORS): def query(self, limit=None, index=None, reverse=False, consistent=False, attributes=None, max_page_size=None, **filter_kwargs): """ + **WARNING:** This method is provided **strictly** for + backward-compatibility. It returns results in an incorrect order. + + If you are writing new code, please use ``Table.query_2``. + """ + reverse = not reverse + return self.query_2(limit=limit, index=index, reverse=reverse, + consistent=consistent, attributes=attributes, + max_page_size=max_page_size, **filter_kwargs) + + def query_2(self, limit=None, index=None, reverse=False, + consistent=False, attributes=None, max_page_size=None, + **filter_kwargs): + """ Queries for a set of matching items in a DynamoDB table. Queries can be performed against a hash key, a hash+range key or @@ -837,7 +851,7 @@ def query(self, limit=None, index=None, reverse=False, consistent=False, (Default: ``None``) Optionally accepts a ``reverse`` parameter, which will present the - results in reverse order. (Default: ``None`` - normal order) + results in reverse order. (Default: ``False`` - normal order) Optionally accepts a ``consistent`` parameter, which should be a boolean. If you provide ``True``, it will force a consistent read of @@ -988,12 +1002,14 @@ def _query(self, limit=None, index=None, reverse=False, consistent=False, kwargs = { 'limit': limit, 'index_name': index, - 'scan_index_forward': reverse, 'consistent_read': consistent, 'select': select, - 'attributes_to_get': attributes_to_get + 'attributes_to_get': attributes_to_get, } + if reverse: + kwargs['scan_index_forward'] = False + if exclusive_start_key: kwargs['exclusive_start_key'] = {} diff --git a/docs/source/dynamodb2_tut.rst b/docs/source/dynamodb2_tut.rst index 62f9361ab2..bca396060e 100644 --- a/docs/source/dynamodb2_tut.rst +++ b/docs/source/dynamodb2_tut.rst @@ -310,6 +310,13 @@ manager. Querying -------- +.. warning:: + + The ``Table`` object has both a ``query`` & a ``query_2`` method. If you + are writing new code, **DO NOT** use ``Table.query``. It presents results + in an incorrect order than expected & is strictly present for + backward-compatibility. + Manually fetching out each item by itself isn't tenable for large datasets. To cope with fetching many records, you can either perform a standard query, query via a local secondary index or scan the entire table. @@ -338,7 +345,7 @@ request. To run a query for last names starting with the letter "D":: - >>> names_with_d = users.query( + >>> names_with_d = users.query_2( ... account_type__eq='standard_user', ... last_name__beginswith='D' ... ) @@ -352,7 +359,7 @@ To run a query for last names starting with the letter "D":: You can also reverse results (``reverse=True``) as well as limiting them (``limit=2``):: - >>> rev_with_d = users.query( + >>> rev_with_d = users.query_2( ... account_type__eq='standard_user', ... last_name__beginswith='D', ... reverse=True, @@ -369,7 +376,7 @@ the index name (``index='FirstNameIndex'``) & filter parameters against its fields:: # Users within the last hour. - >>> recent = users.query( + >>> recent = users.query_2( ... account_type__eq='standard_user', ... date_joined__gte=time.time() - (60 * 60), ... index='DateJoinedIndex' @@ -383,11 +390,11 @@ fields:: By default, DynamoDB can return a large amount of data per-request (up to 1Mb of data). To prevent these requests from drowning other smaller gets, you can specify a smaller page size via the ``max_page_size`` argument to -``Table.query`` & ``Table.scan``. Doing so looks like:: +``Table.query_2`` & ``Table.scan``. Doing so looks like:: # Small pages yield faster responses & less potential of drowning other # requests. - >>> all_users = users.query( + >>> all_users = users.query_2( ... account_type__eq='standard_user', ... date_joined__gte=0, ... max_page_size=10 @@ -429,7 +436,7 @@ Filtering a scan looks like:: The ``ResultSet`` ~~~~~~~~~~~~~~~~~ -Both ``Table.query`` & ``Table.scan`` return an object called ``ResultSet``. +Both ``Table.query_2`` & ``Table.scan`` return an object called ``ResultSet``. It's a lazily-evaluated object that uses the `Iterator protocol`_. It delays your queries until you request the next item in the result set. @@ -460,7 +467,7 @@ a call to ``list()``. Ex.:: Wrapping calls like the above in ``list(...)`` **WILL** cause it to evaluate the **ENTIRE** potentially large data set. - Appropriate use of the ``limit=...`` kwarg to ``Table.query`` & + Appropriate use of the ``limit=...`` kwarg to ``Table.query_2`` & ``Table.scan`` calls are **VERY** important should you chose to do this. Alternatively, you can build your own list, using ``for`` on the diff --git a/docs/source/migrations/dynamodb_v1_to_v2.rst b/docs/source/migrations/dynamodb_v1_to_v2.rst index d945e17b11..d90c3d64b6 100644 --- a/docs/source/migrations/dynamodb_v1_to_v2.rst +++ b/docs/source/migrations/dynamodb_v1_to_v2.rst @@ -231,7 +231,7 @@ DynamoDB v2:: >>> from boto.dynamodb2.table import Table >>> table = Table('messages') - >>> items = table.query( + >>> items = table.query_2( ... forum_name__eq='Amazon DynamoDB', ... subject__beginswith='DynamoDB', ... limit=1 diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py index 1e3335b704..a4de0b71c9 100644 --- a/tests/integration/dynamodb2/test_highlevel.py +++ b/tests/integration/dynamodb2/test_highlevel.py @@ -219,7 +219,7 @@ def test_integration(self): self.assertEqual(serverside_sadie['first_name'], 'Sadie') # Test the eventually consistent query. - results = users.query( + results = users.query_2( username__eq='johndoe', last_name__eq='Doe', index='LastNameIndex', @@ -232,7 +232,7 @@ def test_integration(self): self.assertEqual(res.keys(), ['username']) # Ensure that queries with attributes don't return the hash key. - results = users.query( + results = users.query_2( username__eq='johndoe', friend_count__eq=4, attributes=('first_name',) @@ -243,7 +243,7 @@ def test_integration(self): self.assertEqual(res.keys(), ['first_name']) # Test the strongly consistent query. - c_results = users.query( + c_results = users.query_2( username__eq='johndoe', last_name__eq='Doe', index='LastNameIndex', @@ -322,7 +322,7 @@ def test_integration(self): username__eq='johndoe' ) # But it shouldn't break on more complex tables. - res = users.query(username__eq='johndoe') + res = users.query_2(username__eq='johndoe') # Test putting with/without sets. mau5_created = users.put_item(data={ @@ -458,14 +458,14 @@ def test_gsi_with_just_hash_key(self): }) # Try the main key. Should be fine. - rs = users.query( + rs = users.query_2( user_id__eq='24' ) results = sorted([user['username'] for user in rs]) self.assertEqual(results, ['alice']) # Now try the GSI. Also should work. - rs = users.query( + rs = users.query_2( username__eq='johndoe', index='UsernameIndex' ) @@ -504,7 +504,7 @@ def test_query_with_limits(self): time.sleep(5) # Test the reduced page size. - results = posts.query( + results = posts.query_2( thread__eq='Favorite chiptune band?', posted_on__gte='2013-12-24T00:00:00', max_page_size=2 @@ -515,4 +515,117 @@ def test_query_with_limits(self): [post['posted_by'] for post in all_posts], ['joe', 'jane', 'joe', 'joe', 'jane', 'joe'] ) - self.assertEqual(results._fetches, 3) + self.assertTrue(results._fetches >= 3) + + def test_query_with_reverse(self): + posts = Table.create('more-posts', schema=[ + HashKey('thread'), + RangeKey('posted_on') + ], throughput={ + 'read': 5, + 'write': 5, + }) + self.addCleanup(posts.delete) + + # Wait for it. + time.sleep(60) + + # Add some data. + test_data_path = os.path.join( + os.path.dirname(__file__), + 'forum_test_data.json' + ) + with open(test_data_path, 'r') as test_data: + data = json.load(test_data) + + with posts.batch_write() as batch: + for post in data: + batch.put_item(post) + + time.sleep(5) + + # Test the default order (ascending). + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00' + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) + + # Test the explicit ascending order. + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=False + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) + + # Test the explicit descending order. + results = posts.query_2( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=True + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T15:22:22', + '2013-12-24T14:25:33', + '2013-12-24T14:15:14', + '2013-12-24T13:45:30', + '2013-12-24T12:35:40', + '2013-12-24T12:30:54', + ] + ) + + # Test the old, broken style. + results = posts.query( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00' + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T15:22:22', + '2013-12-24T14:25:33', + '2013-12-24T14:15:14', + '2013-12-24T13:45:30', + '2013-12-24T12:35:40', + '2013-12-24T12:30:54', + ] + ) + results = posts.query( + thread__eq='Favorite chiptune band?', + posted_on__gte='2013-12-24T00:00:00', + reverse=True + ) + self.assertEqual( + [post['posted_on'] for post in results], + [ + '2013-12-24T12:30:54', + '2013-12-24T12:35:40', + '2013-12-24T13:45:30', + '2013-12-24T14:15:14', + '2013-12-24T14:25:33', + '2013-12-24T15:22:22', + ] + ) diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py index 7fd91004a3..1e42660c3b 100644 --- a/tests/unit/dynamodb2/test_table.py +++ b/tests/unit/dynamodb2/test_table.py @@ -883,7 +883,7 @@ def test_max_page_size_and_bigger_limit_fetch_more(self): self.results.fetch_more() self.result_function.assert_called_with('john', greeting='Hello', limit=10) self.result_function.reset_mock() - + def test_fetch_more(self): # First "page". self.results.fetch_more() @@ -2196,7 +2196,7 @@ def test_private_query(self): mock_query.assert_called_once_with('users', consistent_read=False, - scan_index_forward=True, + scan_index_forward=False, index_name=None, attributes_to_get=None, limit=4, @@ -2243,7 +2243,7 @@ def test_private_query(self): }, index_name=None, attributes_to_get=None, - scan_index_forward=True, + scan_index_forward=False, limit=4, exclusive_start_key={ 'username': { @@ -2376,7 +2376,7 @@ def test_query(self): 'last_key': 'jane', } - results = self.users.query(last_name__eq='Doe') + results = self.users.query_2(last_name__eq='Doe') self.assertTrue(isinstance(results, ResultSet)) self.assertEqual(len(results._results), 0) self.assertEqual(results.the_callable, self.users._query) @@ -2430,7 +2430,7 @@ def test_query_with_specific_attributes(self): 'last_key': 'jane', } - results = self.users.query(last_name__eq='Doe', + results = self.users.query_2(last_name__eq='Doe', attributes=['username']) self.assertTrue(isinstance(results, ResultSet)) self.assertEqual(len(results._results), 0) From 35cab92fad3d9cae3590c2fb708264ac42f0c3c6 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Mon, 17 Mar 2014 13:48:42 -0700 Subject: [PATCH 27/60] Fixed removing policies from listeners. --- boto/ec2/elb/__init__.py | 5 +++- tests/integration/ec2/elb/test_connection.py | 25 ++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/boto/ec2/elb/__init__.py b/boto/ec2/elb/__init__.py index d308c72639..ad8695ddd7 100644 --- a/boto/ec2/elb/__init__.py +++ b/boto/ec2/elb/__init__.py @@ -635,7 +635,10 @@ def set_lb_policies_of_listener(self, lb_name, lb_port, policies): """ params = {'LoadBalancerName': lb_name, 'LoadBalancerPort': lb_port} - self.build_list_params(params, policies, 'PolicyNames.member.%d') + if len(policies): + self.build_list_params(params, policies, 'PolicyNames.member.%d') + else: + params['PolicyNames'] = '' return self.get_status('SetLoadBalancerPoliciesOfListener', params) def set_lb_policies_of_backend_server(self, lb_name, instance_port, policies): diff --git a/tests/integration/ec2/elb/test_connection.py b/tests/integration/ec2/elb/test_connection.py index d6895310a5..114bb90236 100644 --- a/tests/integration/ec2/elb/test_connection.py +++ b/tests/integration/ec2/elb/test_connection.py @@ -198,6 +198,31 @@ def test_load_balancer_access_log(self): new_attributes.access_log.s3_bucket_prefix) self.assertEqual(5, new_attributes.access_log.emit_interval) + def test_set_load_balancer_policies_of_listeners(self): + more_listeners = [(443, 8001, 'HTTP')] + self.conn.create_load_balancer_listeners(self.name, more_listeners) + + lb_policy_name = 'lb-policy' + self.conn.create_lb_cookie_stickiness_policy( + 1000, + self.name, + lb_policy_name + ) + self.conn.set_lb_policies_of_listener( + self.name, + self.listeners[0][0], + lb_policy_name + ) + + # Try to remove the policy by passing empty list. + # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_SetLoadBalancerPoliciesOfListener.html + # documents this as the way to remove policies. + self.conn.set_lb_policies_of_listener( + self.name, + self.listeners[0][0], + [] + ) + if __name__ == '__main__': unittest.main() From 3f2ba6f040c9a1c7ca50430b7c7369a60b6092a7 Mon Sep 17 00:00:00 2001 From: Daniel Lindsley Date: Mon, 17 Mar 2014 13:55:27 -0700 Subject: [PATCH 28/60] Fixed part of roboto for euca2ools. Verified on euca2ools 2.1.4. --- boto/roboto/param.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boto/roboto/param.py b/boto/roboto/param.py index ed3e6be9b9..35a25b4af5 100644 --- a/boto/roboto/param.py +++ b/boto/roboto/param.py @@ -46,7 +46,7 @@ def convert_boolean(cls, param, value): @classmethod def convert_file(cls, param, value): - if os.path.isfile(value): + if os.path.exists(value) and not os.path.isdir(value): return value raise ValueError From 1045a83e010888d3e121bc272cecf09e3a9584ba Mon Sep 17 00:00:00 2001 From: Stuart Warren Date: Tue, 18 Mar 2014 17:23:47 +0000 Subject: [PATCH 29/60] Fix specification of elasticbeanstalk tier According to the API: http://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_CreateEnvironment.html The ``member`` part of the Tier key names is not valid. I was always getting: {"Error":{"Code":"InvalidParameterValue","Message":"Environment tier definition not found","Type":"Sender"},"RequestId":"xxxxx"} This allows an environment to be created and to specify the tier_name and tier_type. --- boto/beanstalk/layer1.py | 12 ++++++------ tests/unit/beanstalk/test_layer1.py | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/boto/beanstalk/layer1.py b/boto/beanstalk/layer1.py index 5963f50e9c..adccd5bf7c 100644 --- a/boto/beanstalk/layer1.py +++ b/boto/beanstalk/layer1.py @@ -351,9 +351,9 @@ def create_environment(self, application_name, environment_name, self.build_list_params(params, options_to_remove, 'OptionsToRemove.member') if tier_name and tier_type and tier_version: - params['Tier.member.Name'] = tier_name - params['Tier.member.Type'] = tier_type - params['Tier.member.Version'] = tier_version + params['Tier.Name'] = tier_name + params['Tier.Type'] = tier_type + params['Tier.Version'] = tier_version return self._get_response('CreateEnvironment', params) def create_storage_location(self): @@ -1138,9 +1138,9 @@ def update_environment(self, environment_id=None, environment_name=None, self.build_list_params(params, options_to_remove, 'OptionsToRemove.member') if tier_name and tier_type and tier_version: - params['Tier.member.Name'] = tier_name - params['Tier.member.Type'] = tier_type - params['Tier.member.Version'] = tier_version + params['Tier.Name'] = tier_name + params['Tier.Type'] = tier_type + params['Tier.Version'] = tier_version return self._get_response('UpdateEnvironment', params) def validate_configuration_settings(self, application_name, diff --git a/tests/unit/beanstalk/test_layer1.py b/tests/unit/beanstalk/test_layer1.py index fad51e6551..5632a1642f 100644 --- a/tests/unit/beanstalk/test_layer1.py +++ b/tests/unit/beanstalk/test_layer1.py @@ -143,7 +143,7 @@ def test_create_environment_with_tier(self): 'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment', 'OptionSettings.member.2.OptionName': 'ENVVAR', 'OptionSettings.member.2.Value': 'VALUE1', - 'Tier.member.Name': 'Worker', - 'Tier.member.Type': 'SQS/HTTP', - 'Tier.member.Version': '1.0', + 'Tier.Name': 'Worker', + 'Tier.Type': 'SQS/HTTP', + 'Tier.Version': '1.0', }) From a3a3a285f5b40e6b52e071e2a2b08fc94b3d178d Mon Sep 17 00:00:00 2001 From: Jack Bruno Date: Fri, 21 Mar 2014 20:32:22 -0600 Subject: [PATCH 30/60] Add support for setting failure threshold for Route53 health checks. --- boto/route53/healthcheck.py | 17 +++++++++++++-- .../integration/route53/test_health_check.py | 21 +++++++++++++++++++ 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/boto/route53/healthcheck.py b/boto/route53/healthcheck.py index 059d208b4b..9f112b7d86 100644 --- a/boto/route53/healthcheck.py +++ b/boto/route53/healthcheck.py @@ -40,8 +40,11 @@ if Type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource + 10 | 30 + integer between 1 and 10 - +""" class HealthCheck(object): @@ -57,6 +60,7 @@ class HealthCheck(object): %(fqdn_part)s %(string_match_part)s %(request_interval)s + %(failure_threshold)s """ @@ -68,7 +72,7 @@ class HealthCheck(object): valid_request_intervals = (10, 30) - def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30): + def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30, failure_threshold=3): """ HealthCheck object @@ -93,6 +97,9 @@ def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_matc :type request_interval: int :param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request. + :type failure_threshold: int + :param failure_threshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. + """ self.ip_addr = ip_addr self.port = port @@ -100,6 +107,7 @@ def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_matc self.resource_path = resource_path self.fqdn = fqdn self.string_match = string_match + self.failure_threshold = failure_threshold if request_interval in self.valid_request_intervals: self.request_interval = request_interval @@ -108,6 +116,10 @@ def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_matc "Valid values for request_interval are: %s" % ",".join(str(i) for i in self.valid_request_intervals)) + if failure_threshold < 1 or failure_threshold > 10: + raise AttributeError( + 'Valid values for failure_threshold are 1 - 10.') + def to_xml(self): params = { 'ip_addr': self.ip_addr, @@ -118,6 +130,7 @@ def to_xml(self): 'string_match_part': "", 'request_interval': (self.XMLRequestIntervalPart % {'request_interval': self.request_interval}), + 'failure_threshold': self.failure_threshold, } if self.fqdn is not None: params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn} diff --git a/tests/integration/route53/test_health_check.py b/tests/integration/route53/test_health_check.py index e662da3b29..ed4db5aece 100644 --- a/tests/integration/route53/test_health_check.py +++ b/tests/integration/route53/test_health_check.py @@ -123,6 +123,16 @@ def test_create_health_check_invalid_request_interval(self): with self.assertRaises(AttributeError): HealthCheck(**self.health_check_params(request_interval=5)) + def test_create_health_check_invalid_failure_threshold(self): + """ + Test that health checks cannot be created with an invalid + 'failure_threshold'. + """ + with self.assertRaises(AttributeError): + HealthCheck(**self.health_check_params(failure_threshold=0)) + with self.assertRaises(AttributeError): + HealthCheck(**self.health_check_params(failure_threshold=11)) + def test_create_health_check_request_interval(self): hc_params = self.health_check_params(request_interval=10) hc = HealthCheck(**hc_params) @@ -131,6 +141,17 @@ def test_create_health_check_request_interval(self): [u'HealthCheck'][u'HealthCheckConfig']) self.assertEquals(hc_config[u'RequestInterval'], unicode(hc_params['request_interval'])) + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) + + def test_create_health_check_failure_threshold(self): + hc_params = self.health_check_params(failure_threshold=1) + hc = HealthCheck(**hc_params) + result = self.conn.create_health_check(hc) + hc_config = (result[u'CreateHealthCheckResponse'] + [u'HealthCheck'][u'HealthCheckConfig']) + self.assertEquals(hc_config[u'FailureThreshold'], + unicode(hc_params['failure_threshold'])) + self.conn.delete_health_check(result['CreateHealthCheckResponse']['HealthCheck']['Id']) def health_check_params(self, **kwargs): params = { From 15730f43e3eb7551f53992266ac09e125a28b949 Mon Sep 17 00:00:00 2001 From: David Kimdon Date: Mon, 24 Mar 2014 14:07:41 -0400 Subject: [PATCH 31/60] Add support for ELB Connection Draining attribute. --- boto/ec2/elb/__init__.py | 9 +++++++++ boto/ec2/elb/attributes.py | 33 +++++++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/boto/ec2/elb/__init__.py b/boto/ec2/elb/__init__.py index ad8695ddd7..c9640be576 100644 --- a/boto/ec2/elb/__init__.py +++ b/boto/ec2/elb/__init__.py @@ -386,6 +386,7 @@ def modify_lb_attribute(self, load_balancer_name, attribute, value): * crossZoneLoadBalancing - Boolean (true) * accessLog - :py:class:`AccessLogAttribute` instance + * connectionDraining - :py:class:`ConnectionDrainingAttribute` instance :type value: string :param value: The new value for the attribute @@ -415,6 +416,11 @@ def modify_lb_attribute(self, load_balancer_name, attribute, value): value.s3_bucket_prefix params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \ value.emit_interval + elif attribute.lower() == 'connectiondraining': + params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \ + value.enabled and 'true' or 'false' + params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \ + value.timeout else: raise ValueError('InvalidAttribute', attribute) return self.get_status('ModifyLoadBalancerAttributes', params, @@ -446,6 +452,7 @@ def get_lb_attribute(self, load_balancer_name, attribute): :param attribute: The attribute you wish to see. * crossZoneLoadBalancing - Boolean + * connectionDraining - :py:class:`ConnectionDrainingAttribute` instance :rtype: Attribute dependent :return: The new value for the attribute @@ -453,6 +460,8 @@ def get_lb_attribute(self, load_balancer_name, attribute): attributes = self.get_all_lb_attributes(load_balancer_name) if attribute.lower() == 'crosszoneloadbalancing': return attributes.cross_zone_load_balancing.enabled + if attribute.lower() == 'connectiondraining': + return attributes.connection_draining return None def register_instances(self, load_balancer_name, instances): diff --git a/boto/ec2/elb/attributes.py b/boto/ec2/elb/attributes.py index edf3eedcc0..05ca8f82e7 100644 --- a/boto/ec2/elb/attributes.py +++ b/boto/ec2/elb/attributes.py @@ -74,6 +74,31 @@ def endElement(self, name, value, connection): elif name == 'EmitInterval': self.emit_interval = int(value) +class ConnectionDrainingAttribute(object): + """ + Represents the ConnectionDraining segment of ELB attributes. + """ + def __init__(self, connection=None): + self.enabled = None + self.timeout = None + + def __repr__(self): + return 'ConnectionDraining(%s, %s)' % ( + self.enabled, + self.timeout + ) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'Timeout': + self.timeout = int(value) class LbAttributes(object): """ @@ -84,17 +109,21 @@ def __init__(self, connection=None): self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute( self.connection) self.access_log = AccessLogAttribute(self.connection) + self.connection_draining = ConnectionDrainingAttribute(self.connection) def __repr__(self): - return 'LbAttributes(%s, %s)' % ( + return 'LbAttributes(%s, %s, %s)' % ( repr(self.cross_zone_load_balancing), - repr(self.access_log)) + repr(self.access_log), + repr(self.connection_draining)) def startElement(self, name, attrs, connection): if name == 'CrossZoneLoadBalancing': return self.cross_zone_load_balancing if name == 'AccessLog': return self.access_log + if name == 'ConnectionDraining': + return self.connection_draining def endElement(self, name, value, connection): pass From 4382c67caf63a59cdc06322cac0737fd685b84e6 Mon Sep 17 00:00:00 2001 From: David Kimdon Date: Tue, 25 Mar 2014 10:43:30 -0400 Subject: [PATCH 32/60] Add connection draining tests. Add get attribute tests. Fix get_lb_attribute() to support AccessLog attributes. --- boto/ec2/elb/__init__.py | 3 ++ tests/integration/ec2/elb/test_connection.py | 42 ++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/boto/ec2/elb/__init__.py b/boto/ec2/elb/__init__.py index c9640be576..9c82ce7669 100644 --- a/boto/ec2/elb/__init__.py +++ b/boto/ec2/elb/__init__.py @@ -451,6 +451,7 @@ def get_lb_attribute(self, load_balancer_name, attribute): :type attribute: string :param attribute: The attribute you wish to see. + * accessLog - :py:class:`AccessLogAttribute` instance * crossZoneLoadBalancing - Boolean * connectionDraining - :py:class:`ConnectionDrainingAttribute` instance @@ -458,6 +459,8 @@ def get_lb_attribute(self, load_balancer_name, attribute): :return: The new value for the attribute """ attributes = self.get_all_lb_attributes(load_balancer_name) + if attribute.lower() == 'accesslog': + return attributes.access_log if attribute.lower() == 'crosszoneloadbalancing': return attributes.cross_zone_load_balancing.enabled if attribute.lower() == 'connectiondraining': diff --git a/tests/integration/ec2/elb/test_connection.py b/tests/integration/ec2/elb/test_connection.py index 114bb90236..6ed6a79df4 100644 --- a/tests/integration/ec2/elb/test_connection.py +++ b/tests/integration/ec2/elb/test_connection.py @@ -198,6 +198,48 @@ def test_load_balancer_access_log(self): new_attributes.access_log.s3_bucket_prefix) self.assertEqual(5, new_attributes.access_log.emit_interval) + def test_load_balancer_get_attributes(self): + attributes = self.balancer.get_attributes() + connection_draining = self.conn.get_lb_attribute(self.balancer.name, + 'ConnectionDraining') + self.assertEqual(connection_draining.enabled, + attributes.connection_draining.enabled) + self.assertEqual(connection_draining.timeout, + attributes.connection_draining.timeout) + + access_log = self.conn.get_lb_attribute(self.balancer.name, + 'AccessLog') + self.assertEqual(access_log.enabled, attributes.access_log.enabled) + self.assertEqual(access_log.s3_bucket_name, attributes.access_log.s3_bucket_name) + self.assertEqual(access_log.s3_bucket_prefix, attributes.access_log.s3_bucket_prefix) + self.assertEqual(access_log.emit_interval, attributes.access_log.emit_interval) + + cross_zone_load_balancing = self.conn.get_lb_attribute(self.balancer.name, + 'CrossZoneLoadBalancing') + self.assertEqual(cross_zone_load_balancing, + attributes.cross_zone_load_balancing.enabled) + + def change_and_verify_load_balancer_connection_draining(self, enabled, timeout = None): + attributes = self.balancer.get_attributes() + + attributes.connection_draining.enabled = enabled + if timeout != None: + attributes.connection_draining.timeout = timeout + + self.conn.modify_lb_attribute(self.balancer.name, + 'ConnectionDraining', attributes.connection_draining) + + attributes = self.balancer.get_attributes() + self.assertEqual(enabled, attributes.connection_draining.enabled) + if timeout != None: + self.assertEqual(timeout, attributes.connection_draining.timeout) + + def test_load_balancer_connection_draining_config(self): + self.change_and_verify_load_balancer_connection_draining(True, 128) + self.change_and_verify_load_balancer_connection_draining(True, 256) + self.change_and_verify_load_balancer_connection_draining(False) + self.change_and_verify_load_balancer_connection_draining(True, 64) + def test_set_load_balancer_policies_of_listeners(self): more_listeners = [(443, 8001, 'HTTP')] self.conn.create_load_balancer_listeners(self.name, more_listeners) From 0d2c1f7dbd944f1589e039b204c3fb84e7e91cc2 Mon Sep 17 00:00:00 2001 From: Brunet Date: Wed, 26 Mar 2014 19:00:33 +0100 Subject: [PATCH 33/60] Update connection.py Typo --- boto/ec2/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py index 9e2d4b1925..d34962f6ec 100644 --- a/boto/ec2/connection.py +++ b/boto/ec2/connection.py @@ -109,7 +109,7 @@ def _required_auth_capability(self): def get_params(self): """ - Returns a dictionary containing the value of of all of the keyword + Returns a dictionary containing the value of all of the keyword arguments passed when constructing this connection. """ param_names = ['aws_access_key_id', 'aws_secret_access_key', From 283c35cd1369658d81384ab773c460890fcb3cad Mon Sep 17 00:00:00 2001 From: Kodi Arfer Date: Fri, 28 Mar 2014 16:07:42 -0400 Subject: [PATCH 34/60] Clarified that MTurkConnection.get_assignments attributes are actually strings. Fixes #2176. I also edited a doctest accordingly. --- boto/mturk/connection.py | 6 +++--- tests/mturk/reviewable_hits.doctest | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/boto/mturk/connection.py b/boto/mturk/connection.py index ff011ff652..d57c48bf34 100644 --- a/boto/mturk/connection.py +++ b/boto/mturk/connection.py @@ -388,15 +388,15 @@ def get_assignments(self, hit_id, status=None, The number of assignments on the page in the filtered results list, equivalent to the number of assignments being returned by this call. - A non-negative integer + A non-negative integer, as a string. PageNumber The number of the page in the filtered results list being returned. - A positive integer + A positive integer, as a string. TotalNumResults The total number of HITs in the filtered results list based on this call. - A non-negative integer + A non-negative integer, as a string. The ResultSet will contain zero or more Assignment objects diff --git a/tests/mturk/reviewable_hits.doctest b/tests/mturk/reviewable_hits.doctest index 113a056efd..0d9cfea1b0 100644 --- a/tests/mturk/reviewable_hits.doctest +++ b/tests/mturk/reviewable_hits.doctest @@ -84,10 +84,10 @@ True >>> len(assignments_rs) == int(assignments_rs.NumResults) True ->>> assignments_rs.PageNumber -u'1' +>>> int(assignments_rs.PageNumber) +1 ->>> assignments_rs.TotalNumResults >= 1 +>>> int(assignments_rs.TotalNumResults) >= 1 True # should contain at least one Assignment object From 34a56eeaef39b30262fd400e680c82a9c173f444 Mon Sep 17 00:00:00 2001 From: Kodi Arfer Date: Sun, 2 Mar 2014 15:09:06 -0500 Subject: [PATCH 35/60] [MTurk CLI] Added "give-qual" and "revoke-qual" commands. --- bin/mturk | 39 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/bin/mturk b/bin/mturk index e0b4bab49e..d490ba20f1 100755 --- a/bin/mturk +++ b/bin/mturk @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright 2012 Kodi Arfer +# Copyright 2012, 2014 Kodi Arfer # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -315,6 +315,16 @@ def unreject_assignments(message, assignments): def notify_workers(subject, text, workers): con.notify_workers(workers, subject, text) +def give_qualification(qualification, workers, value = 1, notify = True): + for w in workers: + con.assign_qualification(qualification, w, value, notify) + if interactive: print 'Gave to', w + +def revoke_qualification(qualification, workers, message = None): + for w in workers: + con.revoke_qualification(w, qualification, message) + if interactive: print 'Revoked from', w + # -------------------------------------------------- # Mainline code # -------------------------------------------------- @@ -435,6 +445,33 @@ if __name__ == '__main__': sub.set_defaults(f = notify_workers, a = lambda: [args.subject, args.message, args.worker]) + sub = subs.add_parser('give-qual', + help = 'give a qualification to some workers') + sub.add_argument('qual', + help = 'ID of the qualification') + sub.add_argument('worker', nargs = '+', + help = 'ID of a worker') + sub.add_argument('-v', '--value', dest = 'value', + metavar = 'N', type = int, default = 1, + help = 'value of the qualification') + sub.add_argument('--dontnotify', dest = 'notify', + action = 'store_false', default = True, + help = "don't notify workers") + sub.set_defaults(f = give_qualification, a = lambda: + [args.qual, args.worker, args.value, args.notify]) + + sub = subs.add_parser('revoke-qual', + help = 'revoke a qualification from some workers') + sub.add_argument('qual', + help = 'ID of the qualification') + sub.add_argument('worker', nargs = '+', + help = 'ID of a worker') + sub.add_argument('-m', '--message', dest = 'message', + metavar = 'TEXT', + help = 'the reason the qualification was revoked (shown to workers in an email sent by MTurk)') + sub.set_defaults(f = revoke_qualification, a = lambda: + [args.qual, args.worker, args.message]) + args = parser.parse_args() init_by_args(args) From 80cbf5c67cb9321a4f04e75f0c4fb8420305dd6f Mon Sep 17 00:00:00 2001 From: Kodi Arfer Date: Sun, 2 Mar 2014 15:28:16 -0500 Subject: [PATCH 36/60] [MTurk CLI] Use all caps for positional metavars. --- bin/mturk | 58 +++++++++++++++++++++++++++---------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/bin/mturk b/bin/mturk index d490ba20f1..0c70971ed8 100755 --- a/bin/mturk +++ b/bin/mturk @@ -342,10 +342,10 @@ if __name__ == '__main__': sub = subs.add_parser('hit', help = 'get information about a HIT') - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to show') sub.set_defaults(f = show_hit, a = lambda: - [get_hitid(args.hit)]) + [get_hitid(args.HIT)]) sub = subs.add_parser('hits', help = 'list all your HITs') @@ -355,7 +355,7 @@ if __name__ == '__main__': help = 'create a new HIT (external questions only)', epilog = example_config_file, formatter_class = argparse.RawDescriptionHelpFormatter) - sub.add_argument('json_path', + sub.add_argument('JSON_PATH', help = 'path to JSON configuration file for the HIT') sub.add_argument('-u', '--question-url', dest = 'question_url', metavar = 'URL', @@ -367,13 +367,13 @@ if __name__ == '__main__': type = float, metavar = 'PRICE', help = 'reward amount, in USD') sub.set_defaults(f = make_hit, a = lambda: dict( - unjson(args.json_path).items() + [(k, getattr(args, k)) + unjson(args.JSON_PATH).items() + [(k, getattr(args, k)) for k in ('question_url', 'assignments', 'reward') if getattr(args, k) is not None])) sub = subs.add_parser('extend', help = 'add assignments or time to a HIT') - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to extend') sub.add_argument('-a', '--assignments', dest = 'assignments', metavar = 'N', type = int, @@ -382,74 +382,74 @@ if __name__ == '__main__': metavar = 'T', help = 'amount of time to add to the expiration date') sub.set_defaults(f = extend_hit, a = lambda: - [get_hitid(args.hit), args.assignments, + [get_hitid(args.HIT), args.assignments, args.time and parse_duration(args.time)]) sub = subs.add_parser('expire', help = 'force a HIT to expire without deleting it') - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to expire') sub.set_defaults(f = expire_hit, a = lambda: - [get_hitid(args.hit)]) + [get_hitid(args.HIT)]) sub = subs.add_parser('rm', help = 'delete a HIT') - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to delete') sub.set_defaults(f = delete_hit, a = lambda: - [get_hitid(args.hit)]) + [get_hitid(args.HIT)]) sub = subs.add_parser('as', help = "list a HIT's submitted assignments") - sub.add_argument('hit', + sub.add_argument('HIT', help = 'nickname or ID of the HIT to get assignments for') sub.add_argument('-r', '--reviewable', dest = 'only_reviewable', action = 'store_true', help = 'show only unreviewed assignments') sub.set_defaults(f = list_assignments, a = lambda: - [get_hitid(args.hit), args.only_reviewable]) + [get_hitid(args.HIT), args.only_reviewable]) for command, fun, helpmsg in [ ('approve', approve_assignments, 'approve assignments'), ('reject', reject_assignments, 'reject assignments'), ('unreject', unreject_assignments, 'approve previously rejected assignments')]: sub = subs.add_parser(command, help = helpmsg) - sub.add_argument('assignment', nargs = '+', + sub.add_argument('ASSIGNMENT', nargs = '+', help = 'ID of an assignment') sub.add_argument('-m', '--message', dest = 'message', metavar = 'TEXT', help = 'feedback message shown to workers') sub.set_defaults(f = fun, a = lambda: - [args.message, args.assignment]) + [args.message, args.ASSIGNMENT]) sub = subs.add_parser('bonus', help = 'give some workers a bonus') - sub.add_argument('amount', type = float, + sub.add_argument('AMOUNT', type = float, help = 'bonus amount, in USD') - sub.add_argument('message', + sub.add_argument('MESSAGE', help = 'the reason for the bonus (shown to workers in an email sent by MTurk)') - sub.add_argument('widaid', nargs = '+', + sub.add_argument('WIDAID', nargs = '+', help = 'a WORKER_ID,ASSIGNMENT_ID pair') sub.set_defaults(f = grant_bonus, a = lambda: - [args.message, args.amount, - [p.split(',') for p in args.widaid]]) + [args.MESSAGE, args.AMOUNT, + [p.split(',') for p in args.WIDAID]]) sub = subs.add_parser('notify', help = 'send a message to some workers') - sub.add_argument('subject', + sub.add_argument('SUBJECT', help = 'subject of the message') - sub.add_argument('message', + sub.add_argument('MESSAGE', help = 'text of the message') - sub.add_argument('worker', nargs = '+', + sub.add_argument('WORKER', nargs = '+', help = 'ID of a worker') sub.set_defaults(f = notify_workers, a = lambda: - [args.subject, args.message, args.worker]) + [args.SUBJECT, args.MESSAGE, args.WORKER]) sub = subs.add_parser('give-qual', help = 'give a qualification to some workers') - sub.add_argument('qual', + sub.add_argument('QUAL', help = 'ID of the qualification') - sub.add_argument('worker', nargs = '+', + sub.add_argument('WORKER', nargs = '+', help = 'ID of a worker') sub.add_argument('-v', '--value', dest = 'value', metavar = 'N', type = int, default = 1, @@ -458,19 +458,19 @@ if __name__ == '__main__': action = 'store_false', default = True, help = "don't notify workers") sub.set_defaults(f = give_qualification, a = lambda: - [args.qual, args.worker, args.value, args.notify]) + [args.QUAL, args.WORKER, args.value, args.notify]) sub = subs.add_parser('revoke-qual', help = 'revoke a qualification from some workers') - sub.add_argument('qual', + sub.add_argument('QUAL', help = 'ID of the qualification') - sub.add_argument('worker', nargs = '+', + sub.add_argument('WORKER', nargs = '+', help = 'ID of a worker') sub.add_argument('-m', '--message', dest = 'message', metavar = 'TEXT', help = 'the reason the qualification was revoked (shown to workers in an email sent by MTurk)') sub.set_defaults(f = revoke_qualification, a = lambda: - [args.qual, args.worker, args.message]) + [args.QUAL, args.WORKER, args.message]) args = parser.parse_args() From bd0eae31254dd7ba7eb6955a99663e6ec91bbd0b Mon Sep 17 00:00:00 2001 From: Kodi Arfer Date: Tue, 25 Mar 2014 18:02:39 -0400 Subject: [PATCH 37/60] [MTurk CLI] Get all assignments, not just the first 100. --- bin/mturk | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/bin/mturk b/bin/mturk index 0c70971ed8..a388391f85 100755 --- a/bin/mturk +++ b/bin/mturk @@ -40,6 +40,8 @@ default_nicknames_path = os.path.expanduser('~/.boto_mturkcli_hit_nicknames') nicknames = {} nickname_pool = set(string.ascii_lowercase) +get_assignments_page_size = 100 + time_units = dict( s = 1, min = 60, @@ -281,10 +283,20 @@ but apparently, it does.''' nicknames = {k: v for k, v in nicknames.items() if v != hit} def list_assignments(hit, only_reviewable = False): - assignments = map(digest_assignment, con.get_assignments( - hit_id = hit, - page_size = 100, - status = 'Submitted' if only_reviewable else None)) + # Accumulate all relevant assignments, one page of results at + # a time. + assignments = [] + page = 1 + while True: + rs = con.get_assignments( + hit_id = hit, + page_size = get_assignments_page_size, + page_number = page, + status = 'Submitted' if only_reviewable else None) + assignments += map(digest_assignment, rs) + if len(assignments) >= int(rs.TotalNumResults): + break + page += 1 if interactive: print json.dumps(assignments, sort_keys = True, indent = 4) print ' '.join([a['AssignmentId'] for a in assignments]) From ff7700dfcee84ae04831df82578eb16119e7aa92 Mon Sep 17 00:00:00 2001 From: Brandon Adams Date: Fri, 28 Mar 2014 16:09:09 -0700 Subject: [PATCH 38/60] Align volume delete on terminate with AWS defaults Updated the BlockDeviceType to have a default to terminate volumes on instance termination, the same as the AWS default as documented in these pages: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-RegisterImage.html Also updated register_image to allow for users to explicitly set their delete on terminate preference when creating an image from a snapshot. Updated tests to work with the new default, and created new tests to assert that the default behavior is in place. --- boto/ec2/blockdevicemapping.py | 2 +- boto/ec2/connection.py | 11 +++++-- tests/unit/ec2/autoscale/test_group.py | 4 +-- tests/unit/ec2/test_blockdevicemapping.py | 4 +-- tests/unit/ec2/test_connection.py | 40 ++++++++++++++++++++++- 5 files changed, 53 insertions(+), 8 deletions(-) mode change 100644 => 100755 tests/unit/ec2/autoscale/test_group.py mode change 100644 => 100755 tests/unit/ec2/test_connection.py diff --git a/boto/ec2/blockdevicemapping.py b/boto/ec2/blockdevicemapping.py index 65ffbb1db1..a5a5a3cb9c 100644 --- a/boto/ec2/blockdevicemapping.py +++ b/boto/ec2/blockdevicemapping.py @@ -35,7 +35,7 @@ def __init__(self, snapshot_id=None, status=None, attach_time=None, - delete_on_termination=False, + delete_on_termination=True, size=None, volume_type=None, iops=None): diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py index d34962f6ec..ff239e5cb3 100644 --- a/boto/ec2/connection.py +++ b/boto/ec2/connection.py @@ -266,7 +266,8 @@ def register_image(self, name=None, description=None, image_location=None, root_device_name=None, block_device_map=None, dry_run=False, virtualization_type=None, sriov_net_support=None, - snapshot_id=None): + snapshot_id=None, + delete_root_volume_on_termination=True): """ Register an image. @@ -315,6 +316,11 @@ def register_image(self, name=None, description=None, image_location=None, as root device for the image. Mutually exclusive with block_device_map, requires root_device_name + :type delete_root_volume_on_termination: bool + :param delete_root_volume_on_termination: Whether to delete the root + volume of the image after instance termination. Only applies when + creating image from snapshot_id. + :rtype: string :return: The new image id """ @@ -334,7 +340,8 @@ def register_image(self, name=None, description=None, image_location=None, if root_device_name: params['RootDeviceName'] = root_device_name if snapshot_id: - root_vol = BlockDeviceType(snapshot_id=snapshot_id) + root_vol = BlockDeviceType(snapshot_id=snapshot_id, + delete_on_termination=delete_root_volume_on_termination) block_device_map = BlockDeviceMapping() block_device_map[root_device_name] = root_vol if block_device_map: diff --git a/tests/unit/ec2/autoscale/test_group.py b/tests/unit/ec2/autoscale/test_group.py old mode 100644 new mode 100755 index d56f7a8a8f..69d26386a8 --- a/tests/unit/ec2/autoscale/test_group.py +++ b/tests/unit/ec2/autoscale/test_group.py @@ -370,10 +370,10 @@ def test_launch_config(self): self.assert_request_parameters({ 'Action': 'CreateLaunchConfiguration', 'BlockDeviceMappings.member.1.DeviceName': '/dev/sdf', - 'BlockDeviceMappings.member.1.Ebs.DeleteOnTermination': 'false', + 'BlockDeviceMappings.member.1.Ebs.DeleteOnTermination': 'true', 'BlockDeviceMappings.member.1.Ebs.SnapshotId': 'snap-12345', 'BlockDeviceMappings.member.2.DeviceName': '/dev/sdg', - 'BlockDeviceMappings.member.2.Ebs.DeleteOnTermination': 'false', + 'BlockDeviceMappings.member.2.Ebs.DeleteOnTermination': 'true', 'BlockDeviceMappings.member.2.Ebs.SnapshotId': 'snap-12346', 'EbsOptimized': 'false', 'LaunchConfigurationName': 'launch_config', diff --git a/tests/unit/ec2/test_blockdevicemapping.py b/tests/unit/ec2/test_blockdevicemapping.py index 7b0e922d8b..ce80f61e7d 100644 --- a/tests/unit/ec2/test_blockdevicemapping.py +++ b/tests/unit/ec2/test_blockdevicemapping.py @@ -96,7 +96,7 @@ def test_run_instances_block_device_mapping(self): # Autoscaling). self.set_http_response(status_code=200) dev_sdf = BlockDeviceType(snapshot_id='snap-12345') - dev_sdg = BlockDeviceType(snapshot_id='snap-12346') + dev_sdg = BlockDeviceType(snapshot_id='snap-12346', delete_on_termination=False) bdm = BlockDeviceMapping() bdm['/dev/sdf'] = dev_sdf @@ -112,7 +112,7 @@ def test_run_instances_block_device_mapping(self): self.assert_request_parameters({ 'Action': 'RunInstances', 'BlockDeviceMapping.1.DeviceName': '/dev/sdf', - 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false', + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'true', 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345', 'BlockDeviceMapping.2.DeviceName': '/dev/sdg', 'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'false', diff --git a/tests/unit/ec2/test_connection.py b/tests/unit/ec2/test_connection.py old mode 100644 new mode 100755 index c68f29332a..241df9b308 --- a/tests/unit/ec2/test_connection.py +++ b/tests/unit/ec2/test_connection.py @@ -186,7 +186,7 @@ def test_block_device_mapping(self): 'InstanceId': 'instance_id', 'Name': 'name', 'BlockDeviceMapping.1.DeviceName': 'test', - 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false'}, + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'true'}, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp', 'Version']) @@ -1253,6 +1253,44 @@ def test_sriov_net_support_simple(self): 'SignatureVersion', 'Timestamp', 'Version' ]) + + def test_volume_delete_on_termination_off(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + snapshot_id='snap-12345678', + delete_root_volume_on_termination=False) + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'Name': 'name', + 'Description': 'description', + 'BlockDeviceMapping.1.DeviceName': None, + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'false', + 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678', + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) + + + def test_volume_delete_on_termination_default(self): + self.set_http_response(status_code=200) + self.ec2.register_image('name', 'description', + snapshot_id='snap-12345678') + + self.assert_request_parameters({ + 'Action': 'RegisterImage', + 'Name': 'name', + 'Description': 'description', + 'BlockDeviceMapping.1.DeviceName': None, + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'true', + 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678', + }, ignore_params_values=[ + 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Timestamp', + 'Version' + ]) class TestTerminateInstances(TestEC2ConnectionBase): From c50a2c97217d1b06b46113823be240d6f24d861a Mon Sep 17 00:00:00 2001 From: mikepope Date: Sat, 29 Mar 2014 21:53:40 -0700 Subject: [PATCH 39/60] Update boto_config_tut.rst Added a graph about where to put the config file in Windows. Those of us who have no *nix experience don't necessarily know how to translate the path information here into Windows pathing. Hope I didn't say something wrong! If so, sorry ... --Mike --- docs/source/boto_config_tut.rst | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/source/boto_config_tut.rst b/docs/source/boto_config_tut.rst index 3e8fec244e..a2917a0d92 100644 --- a/docs/source/boto_config_tut.rst +++ b/docs/source/boto_config_tut.rst @@ -17,23 +17,27 @@ of boto config files. Details ------- -A boto config file is simply a .ini format configuration file that specifies -values for options that control the behavior of the boto library. Upon startup, -the boto library looks for configuration files in the following locations +A boto config file is a text file formatted like an .ini configuration file that specifies +values for options that control the behavior of the boto library. In Unix/Linux systems, +on startup, the boto library looks for configuration files in the following locations and in the following order: * /etc/boto.cfg - for site-wide settings that all users on this machine will use * ~/.boto - for user-specific settings -The options are merged into a single, in-memory configuration that is -available as :py:mod:`boto.config`. The :py:class:`boto.pyami.config.Config` +In Windows, create a text file that has any name (e.g. boto.config). It's +recommended that you put this file in your user folder. Then set +a user environment variable named BOTO_CONFIG to the full path of that file. + +The options in the config file are merged into a single, in-memory configuration +that is available as :py:mod:`boto.config`. The :py:class:`boto.pyami.config.Config` class is a subclass of the standard Python :py:class:`ConfigParser.SafeConfigParser` object and inherits all of the methods of that object. In addition, the boto :py:class:`Config ` class defines additional methods that are described on the PyamiConfigMethods page. -An example ``~/.boto`` file should look like:: +An example boto config file might look like:: [Credentials] aws_access_key_id = From e04f94e2e88973369db87f8567c528336b6ae440 Mon Sep 17 00:00:00 2001 From: follower Date: Wed, 2 Apr 2014 02:27:13 +1300 Subject: [PATCH 40/60] Correct typo "possile" --> "possible" --- CONTRIBUTING | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING b/CONTRIBUTING index f29942f89e..d3014eefa0 100644 --- a/CONTRIBUTING +++ b/CONTRIBUTING @@ -40,7 +40,7 @@ Reporting An Issue/Feature * boto * Optionally of the other dependencies involved - * If possile, create a pull request with a (failing) test case demonstrating + * If possible, create a pull request with a (failing) test case demonstrating what's wrong. This makes the process for fixing bugs quicker & gets issues resolved sooner. From 5fc3f42f7541e55810e7a538fc37327dd572e78f Mon Sep 17 00:00:00 2001 From: Brandon Adams Date: Wed, 2 Apr 2014 14:06:01 -0700 Subject: [PATCH 41/60] Reverting changes to default delete on terminate This commit leaves the new option present on register_image, but keeps the default of not deleting volumes on instance termination. --- boto/ec2/blockdevicemapping.py | 2 +- boto/ec2/connection.py | 5 +++-- tests/unit/ec2/autoscale/test_group.py | 4 ++-- tests/unit/ec2/test_blockdevicemapping.py | 6 +++--- tests/unit/ec2/test_connection.py | 10 +++++----- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/boto/ec2/blockdevicemapping.py b/boto/ec2/blockdevicemapping.py index a5a5a3cb9c..65ffbb1db1 100644 --- a/boto/ec2/blockdevicemapping.py +++ b/boto/ec2/blockdevicemapping.py @@ -35,7 +35,7 @@ def __init__(self, snapshot_id=None, status=None, attach_time=None, - delete_on_termination=True, + delete_on_termination=False, size=None, volume_type=None, iops=None): diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py index ff239e5cb3..e67f011aa1 100644 --- a/boto/ec2/connection.py +++ b/boto/ec2/connection.py @@ -267,7 +267,7 @@ def register_image(self, name=None, description=None, image_location=None, dry_run=False, virtualization_type=None, sriov_net_support=None, snapshot_id=None, - delete_root_volume_on_termination=True): + delete_root_volume_on_termination=False): """ Register an image. @@ -319,7 +319,8 @@ def register_image(self, name=None, description=None, image_location=None, :type delete_root_volume_on_termination: bool :param delete_root_volume_on_termination: Whether to delete the root volume of the image after instance termination. Only applies when - creating image from snapshot_id. + creating image from snapshot_id. Defaults to False. Note that + leaving volumes behind after instance termination is not free. :rtype: string :return: The new image id diff --git a/tests/unit/ec2/autoscale/test_group.py b/tests/unit/ec2/autoscale/test_group.py index 69d26386a8..d56f7a8a8f 100755 --- a/tests/unit/ec2/autoscale/test_group.py +++ b/tests/unit/ec2/autoscale/test_group.py @@ -370,10 +370,10 @@ def test_launch_config(self): self.assert_request_parameters({ 'Action': 'CreateLaunchConfiguration', 'BlockDeviceMappings.member.1.DeviceName': '/dev/sdf', - 'BlockDeviceMappings.member.1.Ebs.DeleteOnTermination': 'true', + 'BlockDeviceMappings.member.1.Ebs.DeleteOnTermination': 'false', 'BlockDeviceMappings.member.1.Ebs.SnapshotId': 'snap-12345', 'BlockDeviceMappings.member.2.DeviceName': '/dev/sdg', - 'BlockDeviceMappings.member.2.Ebs.DeleteOnTermination': 'true', + 'BlockDeviceMappings.member.2.Ebs.DeleteOnTermination': 'false', 'BlockDeviceMappings.member.2.Ebs.SnapshotId': 'snap-12346', 'EbsOptimized': 'false', 'LaunchConfigurationName': 'launch_config', diff --git a/tests/unit/ec2/test_blockdevicemapping.py b/tests/unit/ec2/test_blockdevicemapping.py index ce80f61e7d..6455c42116 100644 --- a/tests/unit/ec2/test_blockdevicemapping.py +++ b/tests/unit/ec2/test_blockdevicemapping.py @@ -96,7 +96,7 @@ def test_run_instances_block_device_mapping(self): # Autoscaling). self.set_http_response(status_code=200) dev_sdf = BlockDeviceType(snapshot_id='snap-12345') - dev_sdg = BlockDeviceType(snapshot_id='snap-12346', delete_on_termination=False) + dev_sdg = BlockDeviceType(snapshot_id='snap-12346', delete_on_termination=True) bdm = BlockDeviceMapping() bdm['/dev/sdf'] = dev_sdf @@ -112,10 +112,10 @@ def test_run_instances_block_device_mapping(self): self.assert_request_parameters({ 'Action': 'RunInstances', 'BlockDeviceMapping.1.DeviceName': '/dev/sdf', - 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'true', + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false', 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345', 'BlockDeviceMapping.2.DeviceName': '/dev/sdg', - 'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'false', + 'BlockDeviceMapping.2.Ebs.DeleteOnTermination': 'true', 'BlockDeviceMapping.2.Ebs.SnapshotId': 'snap-12346', 'ImageId': '123456', 'InstanceType': 'm1.large', diff --git a/tests/unit/ec2/test_connection.py b/tests/unit/ec2/test_connection.py index 241df9b308..1b85e0f69b 100755 --- a/tests/unit/ec2/test_connection.py +++ b/tests/unit/ec2/test_connection.py @@ -186,7 +186,7 @@ def test_block_device_mapping(self): 'InstanceId': 'instance_id', 'Name': 'name', 'BlockDeviceMapping.1.DeviceName': 'test', - 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'true'}, + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false'}, ignore_params_values=['AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Timestamp', 'Version']) @@ -1254,18 +1254,18 @@ def test_sriov_net_support_simple(self): 'Version' ]) - def test_volume_delete_on_termination_off(self): + def test_volume_delete_on_termination_on(self): self.set_http_response(status_code=200) self.ec2.register_image('name', 'description', snapshot_id='snap-12345678', - delete_root_volume_on_termination=False) + delete_root_volume_on_termination=True) self.assert_request_parameters({ 'Action': 'RegisterImage', 'Name': 'name', 'Description': 'description', 'BlockDeviceMapping.1.DeviceName': None, - 'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'false', + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'true', 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678', }, ignore_params_values=[ 'AWSAccessKeyId', 'SignatureMethod', @@ -1284,7 +1284,7 @@ def test_volume_delete_on_termination_default(self): 'Name': 'name', 'Description': 'description', 'BlockDeviceMapping.1.DeviceName': None, - 'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'true', + 'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'false', 'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678', }, ignore_params_values=[ 'AWSAccessKeyId', 'SignatureMethod', From b2ba0fe4d6305cdfe54e94c7fb56a1a023b32e5a Mon Sep 17 00:00:00 2001 From: Nicolas Baccelli Date: Thu, 3 Apr 2014 09:41:19 +0000 Subject: [PATCH 42/60] change boto mws order api to new version : 2013-09-01 --- boto/mws/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boto/mws/connection.py b/boto/mws/connection.py index 7c068b52de..7d160429d4 100644 --- a/boto/mws/connection.py +++ b/boto/mws/connection.py @@ -33,7 +33,7 @@ api_version_path = { 'Feeds': ('2009-01-01', 'Merchant', '/'), 'Reports': ('2009-01-01', 'Merchant', '/'), - 'Orders': ('2011-01-01', 'SellerId', '/Orders/2011-01-01'), + 'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'), 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), 'Inbound': ('2010-10-01', 'SellerId', From 04c707879ee47e1c417debdb7726443e1f129827 Mon Sep 17 00:00:00 2001 From: Andy Davidoff Date: Fri, 4 Apr 2014 00:26:30 -0400 Subject: [PATCH 43/60] rejuvenate upstream with calls, factories, parsing --- boto/mws/connection.py | 652 ++++++++++++++++++++++-------- boto/mws/exception.py | 33 +- boto/mws/response.py | 231 ++++++++--- tests/integration/mws/test.py | 4 +- tests/unit/mws/test_connection.py | 41 +- tests/unit/mws/test_response.py | 26 +- 6 files changed, 720 insertions(+), 267 deletions(-) diff --git a/boto/mws/connection.py b/boto/mws/connection.py index 7c068b52de..3cf0389db2 100644 --- a/boto/mws/connection.py +++ b/boto/mws/connection.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -22,26 +22,37 @@ import hashlib import base64 import string +import collections from boto.connection import AWSQueryConnection -from boto.mws.exception import ResponseErrorFactory -from boto.mws.response import ResponseFactory, ResponseElement -from boto.handler import XmlHandler +from boto.exception import BotoServerError +import boto.mws.exception import boto.mws.response +from boto.handler import XmlHandler __all__ = ['MWSConnection'] api_version_path = { - 'Feeds': ('2009-01-01', 'Merchant', '/'), - 'Reports': ('2009-01-01', 'Merchant', '/'), - 'Orders': ('2011-01-01', 'SellerId', '/Orders/2011-01-01'), - 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), - 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), - 'Inbound': ('2010-10-01', 'SellerId', - '/FulfillmentInboundShipment/2010-10-01'), - 'Outbound': ('2010-10-01', 'SellerId', - '/FulfillmentOutboundShipment/2010-10-01'), - 'Inventory': ('2010-10-01', 'SellerId', - '/FulfillmentInventory/2010-10-01'), + 'Feeds': ('2009-01-01', 'Merchant', '/'), + 'Reports': ('2009-01-01', 'Merchant', '/'), + 'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'), + 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), + 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), + 'Inbound': ('2010-10-01', 'SellerId', + '/FulfillmentInboundShipment/2010-10-01'), + 'Outbound': ('2010-10-01', 'SellerId', + '/FulfillmentOutboundShipment/2010-10-01'), + 'Inventory': ('2010-10-01', 'SellerId', + '/FulfillmentInventory/2010-10-01'), + 'Recommendations': ('2013-04-01', 'SellerId', + '/Recommendations/2013-04-01'), + 'CustomerInfo': ('2014-03-01', 'SellerId', + '/CustomerInformation/2014-03-01'), + 'CartInfo': ('2014-03-01', 'SellerId', + '/CartInformation/2014-03-01'), + 'Subscriptions': ('2013-07-01', 'SellerId', + '/Subscriptions/2013-07-01'), + 'OffAmazonPayments': ('2013-01-01', 'SellerId', + '/OffAmazonPayments/2013-01-01'), } content_md5 = lambda c: base64.encodestring(hashlib.md5(c).digest()).strip() decorated_attrs = ('action', 'response', 'section', @@ -94,34 +105,40 @@ def wrapper(*args, **kw): return decorator -def destructure_object(value, into, prefix=''): - if isinstance(value, ResponseElement): - destructure_object(value.__dict__, into, prefix=prefix) - elif isinstance(value, dict): - for name, attr in value.iteritems(): +def destructure_object(value, into, prefix, members=False): + if isinstance(value, boto.mws.response.ResponseElement): + destructure_object(value.__dict__, into, prefix, members=members) + elif isinstance(value, collections.Mapping): + for name in value: if name.startswith('_'): continue - destructure_object(attr, into, prefix=prefix + '.' + name) - elif any([isinstance(value, typ) for typ in (list, set, tuple)]): + destructure_object(value[name], into, prefix + '.' + name, + members=members) + elif isinstance(value, basestring): + into[prefix] = value + elif isinstance(value, collections.Iterable): for index, element in enumerate(value): - newprefix = prefix + '.' + str(index + 1) - destructure_object(element, into, prefix=newprefix) + suffix = (members and '.member.' or '.') + str(index + 1) + destructure_object(element, into, prefix + suffix, + members=members) elif isinstance(value, bool): into[prefix] = str(value).lower() else: into[prefix] = value -def structured_objects(*fields): +def structured_objects(*fields, **kwargs): def decorator(func): def wrapper(*args, **kw): + members = kwargs.get('members', False) for field in filter(kw.has_key, fields): - destructure_object(kw.pop(field), kw, prefix=field) + destructure_object(kw.pop(field), kw, field, members=members) return func(*args, **kw) - wrapper.__doc__ = "{0}\nObjects|dicts: {1}".format(func.__doc__, - ', '.join(fields)) + wrapper.__doc__ = "{0}\nElement|Iter|Map: {1}\n" \ + "(ResponseElement or anything iterable/dict-like)" \ + .format(func.__doc__, ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator @@ -219,11 +236,6 @@ def api_action(section, quota, restore, *api): def decorator(func, quota=int(quota), restore=float(restore)): version, accesskey, path = api_version_path[section] action = ''.join(api or map(str.capitalize, func.func_name.split('_'))) - if hasattr(boto.mws.response, action + 'Response'): - response = getattr(boto.mws.response, action + 'Response') - else: - response = ResponseFactory(action) - response._action = action def wrapper(self, *args, **kw): kw.setdefault(accesskey, getattr(self, accesskey, None)) @@ -234,7 +246,9 @@ def wrapper(self, *args, **kw): raise KeyError(message) kw['Action'] = action kw['Version'] = version - return func(self, path, response, *args, **kw) + response = self._response_factory(action, connection=self) + request = dict(path=path, quota=quota, restore=restore) + return func(self, request, response, *args, **kw) for attr in decorated_attrs: setattr(wrapper, attr, locals().get(attr)) wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \ @@ -247,48 +261,77 @@ def wrapper(self, *args, **kw): class MWSConnection(AWSQueryConnection): - ResponseError = ResponseErrorFactory + ResponseFactory = boto.mws.response.ResponseFactory + ResponseErrorFactory = boto.mws.exception.ResponseErrorFactory def __init__(self, *args, **kw): kw.setdefault('host', 'mws.amazonservices.com') + self._sandboxed = kw.pop('sandbox', False) self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId') self.SellerId = kw.pop('SellerId', None) or self.Merchant + kw = self._setup_factories(kw.pop('factory_scopes', []), **kw) super(MWSConnection, self).__init__(*args, **kw) + def _setup_factories(self, extrascopes, **kw): + for factory, (scope, Default) in { + 'response_factory': + (boto.mws.response, self.ResponseFactory), + 'response_error_factory': + (boto.mws.exception, self.ResponseErrorFactory), + }.items(): + if factory in kw: + setattr(self, '_' + factory, kw.pop(factory)) + else: + scopes = extrascopes + [scope] + setattr(self, '_' + factory, Default(scopes=scopes)) + return kw + + def _sandboxify(self, path): + if not self._sandboxed: + return path + splat = path.split('/') + splat[-2] += '_Sandbox' + return splat.join('/') + def _required_auth_capability(self): return ['mws'] - def post_request(self, path, params, cls, body='', headers=None, - isXML=True): + def _post_request(self, request, params, parser, body='', headers=None): """Make a POST request, optionally with a content body, and return the response, optionally as raw text. - Modelled off of the inherited get_object/make_request flow. """ headers = headers or {} + path = self._sandboxify(request['path']) request = self.build_base_http_request('POST', path, None, data=body, params=params, headers=headers, host=self.host) - response = self._mexe(request, override_num_retries=None) + try: + response = self._mexe(request, override_num_retries=None) + except BotoServerError, bs: + raise self._response_error_factor(bs.status, bs.reason, bs.body) body = response.read() boto.log.debug(body) if not body: boto.log.error('Null body %s' % body) - raise self.ResponseError(response.status, response.reason, body) + raise self._response_error_factory(response.status, + response.reason, body) if response.status != 200: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) - raise self.ResponseError(response.status, response.reason, body) - if not isXML: - digest = response.getheader('Content-MD5') + raise self._response_error_factory(response.status, + response.reason, body) + digest = response.getheader('Content-MD5') + if digest is not None: assert content_md5(body) == digest - return body - return self._parse_response(cls, body) + contenttype = response.getheader('Content-Type') + return self._parse_response(parser, contenttype, body) - def _parse_response(self, cls, body): - obj = cls(self) - h = XmlHandler(obj, self) - xml.sax.parseString(body, h) - return obj + def _parse_response(self, parser, contenttype, body): + if not contenttype.startswith('text/xml'): + return body + handler = XmlHandler(parser, self) + xml.sax.parseString(body, handler) + return parser def method_for(self, name): """Return the MWS API method referred to in the argument. @@ -325,50 +368,50 @@ def iter_response(self, response): @structured_lists('MarketplaceIdList.Id') @requires(['FeedType']) @api_action('Feeds', 15, 120) - def submit_feed(self, path, response, headers=None, body='', **kw): + def submit_feed(self, request, response, headers=None, body='', **kw): """Uploads a feed for processing by Amazon MWS. """ headers = headers or {} - return self.post_request(path, kw, response, body=body, - headers=headers) + return self._post_request(request, kw, response, body=body, + headers=headers) @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type', 'FeedProcessingStatusList.Status') @api_action('Feeds', 10, 45) - def get_feed_submission_list(self, path, response, **kw): + def get_feed_submission_list(self, request, response, **kw): """Returns a list of all feed submissions submitted in the previous 90 days. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Feeds', 0, 0) - def get_feed_submission_list_by_next_token(self, path, response, **kw): + def get_feed_submission_list_by_next_token(self, request, response, **kw): """Returns a list of feed submissions using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status') @api_action('Feeds', 10, 45) - def get_feed_submission_count(self, path, response, **kw): + def get_feed_submission_count(self, request, response, **kw): """Returns a count of the feeds submitted in the previous 90 days. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type') @api_action('Feeds', 10, 45) - def cancel_feed_submissions(self, path, response, **kw): + def cancel_feed_submissions(self, request, response, **kw): """Cancels one or more feed submissions and returns a count of the feed submissions that were canceled. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['FeedSubmissionId']) @api_action('Feeds', 15, 60) - def get_feed_submission_result(self, path, response, **kw): + def get_feed_submission_result(self, request, response, **kw): """Returns the feed processing report. """ - return self.post_request(path, kw, response, isXML=False) + return self._post_request(request, kw, response) def get_service_status(self, **kw): """Instruct the user on how to get service status. @@ -383,230 +426,230 @@ def get_service_status(self, **kw): @boolean_arguments('ReportOptions=ShowSalesChannel') @requires(['ReportType']) @api_action('Reports', 15, 60) - def request_report(self, path, response, **kw): + def request_report(self, request, response, **kw): """Creates a report request and submits the request to Amazon MWS. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type', 'ReportProcessingStatusList.Status') @api_action('Reports', 10, 45) - def get_report_request_list(self, path, response, **kw): + def get_report_request_list(self, request, response, **kw): """Returns a list of report requests that you can use to get the ReportRequestId for a report. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) - def get_report_request_list_by_next_token(self, path, response, **kw): + def get_report_request_list_by_next_token(self, request, response, **kw): """Returns a list of report requests using the NextToken, which was supplied by a previous request to either GetReportRequestListByNextToken or GetReportRequestList, where the value of HasNext was true in that previous request. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type', 'ReportProcessingStatusList.Status') @api_action('Reports', 10, 45) - def get_report_request_count(self, path, response, **kw): + def get_report_request_count(self, request, response, **kw): """Returns a count of report requests that have been submitted to Amazon MWS for processing. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Reports', 10, 45) - def cancel_report_requests(self, path, response, **kw): + def cancel_report_requests(self, request, response, **kw): """Cancel one or more report requests, returning the count of the canceled report requests and the report request information. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type') @api_action('Reports', 10, 60) - def get_report_list(self, path, response, **kw): + def get_report_list(self, request, response, **kw): """Returns a list of reports that were created in the previous 90 days that match the query parameters. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) - def get_report_list_by_next_token(self, path, response, **kw): + def get_report_list_by_next_token(self, request, response, **kw): """Returns a list of reports using the NextToken, which was supplied by a previous request to either GetReportListByNextToken or GetReportList, where the value of HasNext was true in the previous call. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) - def get_report_count(self, path, response, **kw): + def get_report_count(self, request, response, **kw): """Returns a count of the reports, created in the previous 90 days, with a status of _DONE_ and that are available for download. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ReportId']) @api_action('Reports', 15, 60) - def get_report(self, path, response, **kw): + def get_report(self, request, response, **kw): """Returns the contents of a report. """ - return self.post_request(path, kw, response, isXML=False) + return self._post_request(request, kw, response) @requires(['ReportType', 'Schedule']) @api_action('Reports', 10, 45) - def manage_report_schedule(self, path, response, **kw): + def manage_report_schedule(self, request, response, **kw): """Creates, updates, or deletes a report request schedule for a specified report type. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) - def get_report_schedule_list(self, path, response, **kw): + def get_report_schedule_list(self, request, response, **kw): """Returns a list of order report requests that are scheduled to be submitted to Amazon MWS for processing. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Reports', 0, 0) - def get_report_schedule_list_by_next_token(self, path, response, **kw): + def get_report_schedule_list_by_next_token(self, request, response, **kw): """Returns a list of report requests using the NextToken, which was supplied by a previous request to either GetReportScheduleListByNextToken or GetReportScheduleList, where the value of HasNext was true in that previous request. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_lists('ReportTypeList.Type') @api_action('Reports', 10, 45) - def get_report_schedule_count(self, path, response, **kw): + def get_report_schedule_count(self, request, response, **kw): """Returns a count of order report requests that are scheduled to be submitted to Amazon MWS. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @boolean_arguments('Acknowledged') @requires(['ReportIdList']) @structured_lists('ReportIdList.Id') @api_action('Reports', 10, 45) - def update_report_acknowledgements(self, path, response, **kw): + def update_report_acknowledgements(self, request, response, **kw): """Updates the acknowledged status of one or more reports. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems']) @structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems') @api_action('Inbound', 30, 0.5) - def create_inbound_shipment_plan(self, path, response, **kw): + def create_inbound_shipment_plan(self, request, response, **kw): """Returns the information required to create an inbound shipment. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems']) @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') @api_action('Inbound', 30, 0.5) - def create_inbound_shipment(self, path, response, **kw): + def create_inbound_shipment(self, request, response, **kw): """Creates an inbound shipment. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ShipmentId']) @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') @api_action('Inbound', 30, 0.5) - def update_inbound_shipment(self, path, response, **kw): + def update_inbound_shipment(self, request, response, **kw): """Updates an existing inbound shipment. Amazon documentation is ambiguous as to whether the InboundShipmentHeader and InboundShipmentItems arguments are required. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires_some_of('ShipmentIdList', 'ShipmentStatusList') @structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status') @api_action('Inbound', 30, 0.5) - def list_inbound_shipments(self, path, response, **kw): + def list_inbound_shipments(self, request, response, **kw): """Returns a list of inbound shipments based on criteria that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inbound', 30, 0.5) - def list_inbound_shipments_by_next_token(self, path, response, **kw): + def list_inbound_shipments_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipments using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore']) @api_action('Inbound', 30, 0.5) - def list_inbound_shipment_items(self, path, response, **kw): + def list_inbound_shipment_items(self, request, response, **kw): """Returns a list of items in a specified inbound shipment, or a list of items that were updated within a specified time frame. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inbound', 30, 0.5) - def list_inbound_shipment_items_by_next_token(self, path, response, **kw): + def list_inbound_shipment_items_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipment items using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Inbound', 2, 300, 'GetServiceStatus') - def get_inbound_service_status(self, path, response, **kw): + def get_inbound_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Inbound Shipment API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['SellerSkus'], ['QueryStartDateTime']) @structured_lists('SellerSkus.member') @api_action('Inventory', 30, 0.5) - def list_inventory_supply(self, path, response, **kw): + def list_inventory_supply(self, request, response, **kw): """Returns information about the availability of a seller's inventory. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Inventory', 30, 0.5) - def list_inventory_supply_by_next_token(self, path, response, **kw): + def list_inventory_supply_by_next_token(self, request, response, **kw): """Returns the next page of information about the availability of a seller's inventory using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Inventory', 2, 300, 'GetServiceStatus') - def get_inventory_service_status(self, path, response, **kw): + def get_inventory_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Inventory API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['PackageNumber']) @api_action('Outbound', 30, 0.5) - def get_package_tracking_details(self, path, response, **kw): + def get_package_tracking_details(self, request, response, **kw): """Returns delivery tracking information for a package in an outbound shipment for a Multi-Channel Fulfillment order. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_objects('Address', 'Items') @requires(['Address', 'Items']) @api_action('Outbound', 30, 0.5) - def get_fulfillment_preview(self, path, response, **kw): + def get_fulfillment_preview(self, request, response, **kw): """Returns a list of fulfillment order previews based on items and shipping speed categories that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @structured_objects('DestinationAddress', 'Items') @requires(['SellerFulfillmentOrderId', 'DisplayableOrderId', @@ -614,49 +657,49 @@ def get_fulfillment_preview(self, path, response, **kw): 'DestinationAddress', 'DisplayableOrderComment', 'Items']) @api_action('Outbound', 30, 0.5) - def create_fulfillment_order(self, path, response, **kw): + def create_fulfillment_order(self, request, response, **kw): """Requests that Amazon ship items from the seller's inventory to a destination address. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId']) @api_action('Outbound', 30, 0.5) - def get_fulfillment_order(self, path, response, **kw): + def get_fulfillment_order(self, request, response, **kw): """Returns a fulfillment order based on a specified SellerFulfillmentOrderId. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Outbound', 30, 0.5) - def list_all_fulfillment_orders(self, path, response, **kw): + def list_all_fulfillment_orders(self, request, response, **kw): """Returns a list of fulfillment orders fulfilled after (or at) a specified date or by fulfillment method. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Outbound', 30, 0.5) - def list_all_fulfillment_orders_by_next_token(self, path, response, **kw): + def list_all_fulfillment_orders_by_next_token(self, request, response, **kw): """Returns the next page of inbound shipment items using the NextToken parameter. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['SellerFulfillmentOrderId']) @api_action('Outbound', 30, 0.5) - def cancel_fulfillment_order(self, path, response, **kw): + def cancel_fulfillment_order(self, request, response, **kw): """Requests that Amazon stop attempting to fulfill an existing fulfillment order. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Outbound', 2, 300, 'GetServiceStatus') - def get_outbound_service_status(self, path, response, **kw): + def get_outbound_service_status(self, request, response, **kw): """Returns the operational status of the Fulfillment Outbound API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['CreatedAfter'], ['LastUpdatedAfter']) @exclusive(['CreatedAfter'], ['LastUpdatedAfter']) @@ -670,7 +713,7 @@ def get_outbound_service_status(self, path, response, **kw): @structured_lists('MarketplaceId.Id', 'OrderStatus.Status', 'FulfillmentChannel.Channel', 'PaymentMethod.') @api_action('Orders', 6, 60) - def list_orders(self, path, response, **kw): + def list_orders(self, request, response, **kw): """Returns a list of orders created or updated during a time frame that you specify. """ @@ -685,145 +728,424 @@ def list_orders(self, path, response, **kw): message = "Don't include {0} when specifying " \ "{1}".format(' or '.join(dont), do) raise AssertionError(message) - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Orders', 6, 60) - def list_orders_by_next_token(self, path, response, **kw): + def list_orders_by_next_token(self, request, response, **kw): """Returns the next page of orders using the NextToken value that was returned by your previous request to either ListOrders or ListOrdersByNextToken. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['AmazonOrderId']) @structured_lists('AmazonOrderId.Id') @api_action('Orders', 6, 60) - def get_order(self, path, response, **kw): + def get_order(self, request, response, **kw): """Returns an order for each AmazonOrderId that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['AmazonOrderId']) @api_action('Orders', 30, 2) - def list_order_items(self, path, response, **kw): + def list_order_items(self, request, response, **kw): """Returns order item information for an AmazonOrderId that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Orders', 30, 2) - def list_order_items_by_next_token(self, path, response, **kw): + def list_order_items_by_next_token(self, request, response, **kw): """Returns the next page of order items using the NextToken value that was returned by your previous request to either ListOrderItems or ListOrderItemsByNextToken. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Orders', 2, 300, 'GetServiceStatus') - def get_orders_service_status(self, path, response, **kw): + def get_orders_service_status(self, request, response, **kw): """Returns the operational status of the Orders API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'Query']) @api_action('Products', 20, 20) - def list_matching_products(self, path, response, **kw): + def list_matching_products(self, request, response, **kw): """Returns a list of products and their attributes, ordered by relevancy, based on a search query that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 20) - def get_matching_product(self, path, response, **kw): + def get_matching_product(self, request, response, **kw): """Returns a list of products and their attributes, based on a list of ASIN values that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'IdType', 'IdList']) @structured_lists('IdList.Id') @api_action('Products', 20, 20) - def get_matching_product_for_id(self, path, response, **kw): + def get_matching_product_for_id(self, request, response, **kw): """Returns a list of products and their attributes, based on a list of Product IDs that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 10, 'GetCompetitivePricingForSKU') - def get_competitive_pricing_for_sku(self, path, response, **kw): + def get_competitive_pricing_for_sku(self, request, response, **kw): """Returns the current competitive pricing of a product, based on the SellerSKUs and MarketplaceId that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 10, 'GetCompetitivePricingForASIN') - def get_competitive_pricing_for_asin(self, path, response, **kw): + def get_competitive_pricing_for_asin(self, request, response, **kw): """Returns the current competitive pricing of a product, based on the ASINs and MarketplaceId that you specify. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKUList']) @structured_lists('SellerSKUList.SellerSKU') @api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU') - def get_lowest_offer_listings_for_sku(self, path, response, **kw): + def get_lowest_offer_listings_for_sku(self, request, response, **kw): """Returns the lowest price offer listings for a specific product by item condition and SellerSKUs. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASINList']) @structured_lists('ASINList.ASIN') @api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN') - def get_lowest_offer_listings_for_asin(self, path, response, **kw): + def get_lowest_offer_listings_for_asin(self, request, response, **kw): """Returns the lowest price offer listings for a specific product by item condition and ASINs. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'SellerSKU']) @api_action('Products', 20, 20, 'GetProductCategoriesForSKU') - def get_product_categories_for_sku(self, path, response, **kw): + def get_product_categories_for_sku(self, request, response, **kw): """Returns the product categories that a SellerSKU belongs to. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['MarketplaceId', 'ASIN']) @api_action('Products', 20, 20, 'GetProductCategoriesForASIN') - def get_product_categories_for_asin(self, path, response, **kw): + def get_product_categories_for_asin(self, request, response, **kw): """Returns the product categories that an ASIN belongs to. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Products', 2, 300, 'GetServiceStatus') - def get_products_service_status(self, path, response, **kw): + def get_products_service_status(self, request, response, **kw): """Returns the operational status of the Products API section. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @api_action('Sellers', 15, 60) - def list_marketplace_participations(self, path, response, **kw): + def list_marketplace_participations(self, request, response, **kw): """Returns a list of marketplaces that the seller submitting the request can sell in, and a list of participations that include seller-specific information in that marketplace. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) @requires(['NextToken']) @api_action('Sellers', 15, 60) - def list_marketplace_participations_by_next_token(self, path, response, + def list_marketplace_participations_by_next_token(self, request, response, **kw): """Returns the next page of marketplaces and participations using the NextToken value that was returned by your previous request to either ListMarketplaceParticipations or ListMarketplaceParticipationsByNextToken. """ - return self.post_request(path, kw, response) + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Recommendations', 5, 2) + def get_last_updated_time_for_recommendations(self, request, response, + **kw): + """Checks whether there are active recommendations for each category + for the given marketplace, and if there are, returns the time when + recommendations were last updated for each category. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @structured_lists('CategoryQueryList.CategoryQuery') + @api_action('Recommendations', 5, 2) + def list_recommendations(self, request, response, **kw): + """Returns your active recommendations for a specific category or for + all categories for a specific marketplace. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Recommendations', 5, 2) + def list_recommendations_by_next_token(self, request, response, **kw): + """Returns the next page of recommendations using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @api_action('Recommendations', 2, 300, 'GetServiceStatus') + def get_recommendations_service_status(self, request, response, **kw): + """Returns the operational status of the Recommendations API section. + """ + return self._post_request(request, kw, response) + + @api_action('CustomerInfo', 15, 12) + def list_customers(self, request, response, **kw): + """Returns a list of customer accounts based on search criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('CustomerInfo', 50, 3) + def list_customers_by_next_token(self, request, response, **kw): + """Returns the next page of customers using the NextToken parameter. + """ + return self._post_request(request, kw, response) + + @requires(['CustomerIdList']) + @structured_lists('CustomerIdList.CustomerId') + @api_action('CustomerInfo', 15, 12) + def get_customers_for_customer_id(self, request, response, **kw): + """Returns a list of customer accounts based on search criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @api_action('CustomerInfo', 2, 300, 'GetServiceStatus') + def get_customerinfo_service_status(self, request, response, **kw): + """Returns the operational status of the Customer Information API + section. + """ + return self._post_request(request, kw, response) + + @requires(['DateRangeStart']) + @api_action('CartInfo', 15, 12) + def list_carts(self, request, response, **kw): + """Returns a list of shopping carts in your Webstore that were last + updated during the time range that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('CartInfo', 50, 3) + def list_carts_by_next_token(self, request, response, **kw): + """Returns the next page of shopping carts using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @requires(['CartIdList']) + @structured_lists('CartIdList.CartId') + @api_action('CartInfo', 15, 12) + def get_carts(self, request, response, **kw): + """Returns shopping carts based on the CartId values that you specify. + """ + return self._post_request(request, kw, response) + + @api_action('CartInfo', 2, 300, 'GetServiceStatus') + def get_cartinfo_service_status(self, request, response, **kw): + """Returns the operational status of the Cart Information API section. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def register_destination(self, request, response, **kw): + """Specifies a new destination where you want to receive notifications. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def deregister_destination(self, request, response, **kw): + """Removes an existing destination from the list of registered + destinations. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Subscriptions', 25, 0.5) + def list_registered_destinations(self, request, response, **kw): + """Lists all current destinations that you have registered. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def send_test_notification_to_destination(self, request, response, **kw): + """Sends a test notification to an existing destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Subscription']) + @structured_objects('Subscription', members=True) + @api_action('Subscriptions', 25, 0.5) + def create_subscription(self, request, response, **kw): + """Creates a new subscription for the specified notification type + and destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'NotificationType', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def get_subscription(self, request, response, **kw): + """Gets the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'NotificationType', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def delete_subscription(self, request, response, **kw): + """Deletes the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Subscriptions', 25, 0.5) + def list_subscriptions(self, request, response, **kw): + """Returns a list of all your current subscriptions. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Subscription']) + @structured_objects('Subscription', members=True) + @api_action('Subscriptions', 25, 0.5) + def update_subscription(self, request, response, **kw): + """Updates the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @api_action('Subscriptions', 2, 300, 'GetServiceStatus') + def get_subscriptions_service_status(self, request, response, **kw): + """Returns the operational status of the Subscriptions API section. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId', 'OrderReferenceAttributes']) + @structured_objects('OrderReferenceAttributes') + @api_action('OffAmazonPayments', 10, 1) + def set_order_reference_details(self, request, response, **kw): + """Sets order reference details such as the order total and a + description for the order. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 20, 2) + def get_order_reference_details(self, request, response, **kw): + """Returns details about the Order Reference object and its current + state. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def confirm_order_reference(self, request, response, **kw): + """Confirms that the order reference is free of constraints and all + required information has been set on the order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def cancel_order_reference(self, request, response, **kw): + """Cancel an order reference; all authorizations associated with + this order reference are also closed. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def close_order_reference(self, request, response, **kw): + """Confirms that an order reference has been fulfilled (fully + or partially) and that you do not expect to create any new + authorizations on this order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId', 'AuthorizationReferenceId', + 'AuthorizationAmount']) + @structured_objects('AuthorizationAmount') + @api_action('OffAmazonPayments', 10, 1) + def authorize(self, request, response, **kw): + """Reserves a specified amount against the payment method(s) stored in + the order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId']) + @api_action('OffAmazonPayments', 20, 2) + def get_authorization_details(self, request, response, **kw): + """Returns the status of a particular authorization and the total + amount captured on the authorization. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId', 'CaptureReferenceId', 'CaptureAmount']) + @structured_objects('CaptureAmount') + @api_action('OffAmazonPayments', 10, 1) + def capture(self, request, response, **kw): + """Captures funds from an authorized payment instrument. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonCaptureId']) + @api_action('OffAmazonPayments', 20, 2) + def get_capture_details(self, request, response, **kw): + """Returns the status of a particular capture and the total amount + refunded on the capture. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId']) + @api_action('OffAmazonPayments', 10, 1) + def close_authorization(self, request, response, **kw): + """Closes an authorization. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonCaptureId', 'RefundReferenceId', 'RefundAmount']) + @structured_objects('RefundAmount') + @api_action('OffAmazonPayments', 10, 1) + def refund(self, request, response, **kw): + """Refunds a previously captured amount. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonRefundId']) + @api_action('OffAmazonPayments', 20, 2) + def get_refund_details(self, request, response, **kw): + """Returns the status of a particular refund. + """ + return self._post_request(request, kw, response) + + @api_action('OffAmazonPayments', 2, 300, 'GetServiceStatus') + def get_offamazonpayments_service_status(self, request, response, **kw): + """Returns the operational status of the Off-Amazon Payments API + section. + """ + return self._post_request(request, kw, response) diff --git a/boto/mws/exception.py b/boto/mws/exception.py index d84df4a853..74ab165ef1 100644 --- a/boto/mws/exception.py +++ b/boto/mws/exception.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -19,19 +19,16 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.exception import BotoServerError +from boto.mws.response import ResponseFactory -class ResponseErrorFactory(BotoServerError): +class ResponseErrorFactory(ResponseFactory): - def __new__(cls, *args, **kw): - error = BotoServerError(*args, **kw) - try: - newclass = globals()[error.error_code] - except KeyError: - newclass = ResponseError - obj = newclass.__new__(newclass, *args, **kw) - obj.__dict__.update(error.__dict__) - return obj + def __call__(self, status, reason, body=None): + server = BotoServerError(status, reason, body=body) + supplied = self.find_element(server.error_code, '', ResponseError) + print supplied.__name__ + return supplied(status, reason, body=body) class ResponseError(BotoServerError): @@ -41,16 +38,14 @@ class ResponseError(BotoServerError): retry = False def __repr__(self): - return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__, - self.status, self.reason, - self.error_message) + return '{0.__name__}({1.reason}: "{1.message}")' \ + .format(self.__class__, self) def __str__(self): - return 'MWS Response Error: {0.status} {0.__class__.__name__} {1}\n' \ - '{2}\n' \ - '{0.error_message}'.format(self, - self.retry and '(Retriable)' or '', - self.__doc__.strip()) + doc = self.__doc__ and self.__doc__.strip() + "\n" or '' + return '{1.__name__}: {0.reason} {2}\n{3}' \ + '{0.message}'.format(self, self.__class__, + self.retry and '(Retriable)' or '', doc) class RetriableResponseError(ResponseError): diff --git a/boto/mws/response.py b/boto/mws/response.py index 0960e46e5f..93485479c3 100644 --- a/boto/mws/response.py +++ b/boto/mws/response.py @@ -1,23 +1,21 @@ -# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ # -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, dis- -# tribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to the fol- -# lowing conditions: +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, dis- tribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the fol- lowing conditions: # -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. # -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- -# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- ITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from decimal import Decimal @@ -62,10 +60,10 @@ def setup(self, parent, name, *args, **kw): setattr(self._parent, self._name, self._clone) def start(self, *args, **kw): - raise NotImplemented + raise NotImplementedError def end(self, *args, **kw): - raise NotImplemented + raise NotImplementedError def teardown(self, *args, **kw): setattr(self._parent, self._name, self._value) @@ -133,14 +131,40 @@ def teardown(self, *args, **kw): super(MemberList, self).teardown(*args, **kw) -def ResponseFactory(action, force=None): - result = force or globals().get(action + 'Result', ResponseElement) - - class MWSResponse(Response): - _name = action + 'Response' - - setattr(MWSResponse, action + 'Result', Element(result)) - return MWSResponse +class ResponseFactory(object): + def __init__(self, scopes=None): + self.scopes = [] if scopes is None else scopes + + def element_factory(self, name, parent): + class DynamicElement(parent): + _name = name + setattr(DynamicElement, '__name__', str(name)) + return DynamicElement + + def search_scopes(self, key): + for scope in self.scopes: + if hasattr(scope, key): + return getattr(scope, key) + if hasattr(scope, '__getitem__'): + if key in scope: + return scope[key] + + def find_element(self, action, suffix, parent): + element = self.search_scopes(action + suffix) + if element is not None: + return element + if action.endswith('ByNextToken'): + element = self.search_scopes(action[:-len('ByNextToken')] + suffix) + if element is not None: + return self.element_factory(action + suffix, element) + return self.element_factory(action + suffix, parent) + + def __call__(self, action, connection=None): + response = self.find_element(action, 'Response', Response) + if not hasattr(response, action + 'Result'): + result = self.find_element(action, 'Result', ResponseElement) + setattr(response, action + 'Result', Element(result)) + return response(connection=connection) def strip_namespace(func): @@ -191,8 +215,6 @@ def __repr__(self): name = self.__class__.__name__ if name.startswith('JIT_'): name = '^{0}^'.format(self._name or '') - elif name == 'MWSResponse': - name = '^{0}^'.format(self._name or name) return '{0}{1!r}({2})'.format( name, self.copy(), ', '.join(map(render, attrs))) @@ -262,10 +284,6 @@ class GetFeedSubmissionListResult(ResponseElement): FeedSubmissionInfo = ElementList(FeedSubmissionInfo) -class GetFeedSubmissionListByNextTokenResult(GetFeedSubmissionListResult): - pass - - class GetFeedSubmissionCountResult(ResponseElement): pass @@ -290,10 +308,6 @@ class GetReportRequestListResult(RequestReportResult): ReportRequestInfo = ElementList() -class GetReportRequestListByNextTokenResult(GetReportRequestListResult): - pass - - class CancelReportRequestsResult(RequestReportResult): pass @@ -302,10 +316,6 @@ class GetReportListResult(ResponseElement): ReportInfo = ElementList() -class GetReportListByNextTokenResult(GetReportListResult): - pass - - class ManageReportScheduleResult(ResponseElement): ReportSchedule = Element() @@ -314,10 +324,6 @@ class GetReportScheduleListResult(ManageReportScheduleResult): pass -class GetReportScheduleListByNextTokenResult(GetReportScheduleListResult): - pass - - class UpdateReportAcknowledgementsResult(GetReportListResult): pass @@ -331,18 +337,10 @@ class ListInboundShipmentsResult(ResponseElement): ShipmentData = MemberList(ShipFromAddress=Element()) -class ListInboundShipmentsByNextTokenResult(ListInboundShipmentsResult): - pass - - class ListInboundShipmentItemsResult(ResponseElement): ItemData = MemberList() -class ListInboundShipmentItemsByNextTokenResult(ListInboundShipmentItemsResult): - pass - - class ListInventorySupplyResult(ResponseElement): InventorySupplyList = MemberList( EarliestAvailability=Element(), @@ -353,10 +351,6 @@ class ListInventorySupplyResult(ResponseElement): ) -class ListInventorySupplyByNextTokenResult(ListInventorySupplyResult): - pass - - class ComplexAmount(ResponseElement): _amount = 'Value' @@ -472,10 +466,6 @@ class ListAllFulfillmentOrdersResult(ResponseElement): FulfillmentOrders = MemberList(FulfillmentOrder) -class ListAllFulfillmentOrdersByNextTokenResult(ListAllFulfillmentOrdersResult): - pass - - class GetPackageTrackingDetailsResult(ResponseElement): ShipToAddress = Element() TrackingEvents = MemberList(EventAddress=Element()) @@ -636,10 +626,6 @@ class ListOrdersResult(ResponseElement): Orders = Element(Order=ElementList(Order)) -class ListOrdersByNextTokenResult(ListOrdersResult): - pass - - class GetOrderResult(ListOrdersResult): pass @@ -667,5 +653,118 @@ class ListMarketplaceParticipationsResult(ResponseElement): ListMarketplaces = Element(Marketplace=ElementList()) -class ListMarketplaceParticipationsByNextTokenResult(ListMarketplaceParticipationsResult): +class ListRecommendationsResult(ResponseElement): + ListingQualityRecommendations = MemberList(ItemIdentifier=Element()) + + +class Customer(ResponseElement): + PrimaryContactInfo = Element() + ShippingAddressList = Element(ShippingAddress=ElementList()) + AssociatedMarketplaces = Element(MarketplaceDomain=ElementList()) + + +class ListCustomersResult(ResponseElement): + CustomerList = Element(Customer=ElementList(Customer)) + + +class GetCustomersForCustomerIdResult(ListCustomersResult): + pass + + +class CartItem(ResponseElement): + CurrentPrice = Element(ComplexMoney) + SalePrice = Element(ComplexMoney) + + +class Cart(ResponseElement): + ActiveCartItemList = Element(CartItem=ElementList(CartItem)) + SavedCartItemList = Element(CartItem=ElementList(CartItem)) + + +class ListCartsResult(ResponseElement): + CartList = Element(Cart=ElementList(Cart)) + + +class GetCartsResult(ListCartsResult): + pass + + +class Destination(ResponseElement): + AttributeList = MemberList() + + +class ListRegisteredDestinationsResult(ResponseElement): + DestinationList = MemberList(Destination) + + +class Subscription(ResponseElement): + Destination = Element(Destination) + + +class GetSubscriptionResult(ResponseElement): + Subscription = Element(Subscription) + + +class ListSubscriptionsResult(ResponseElement): + SubscriptionList = MemberList(Subscription) + + +class OrderReferenceDetails(ResponseElement): + Buyer = Element() + OrderTotal = Element(ComplexMoney) + Destination = Element(PhysicalDestination=Element()) + SellerOrderAttributes = Element() + OrderReferenceStatus = Element() + Constraints = ElementList() + + +class SetOrderReferenceDetailsResult(ResponseElement): + OrderReferenceDetails = Element(OrderReferenceDetails) + + +class GetOrderReferenceDetailsResult(SetOrderReferenceDetailsResult): + pass + + +class AuthorizationDetails(ResponseElement): + AuthorizationAmount = Element(ComplexMoney) + CapturedAmount = Element(ComplexMoney) + AuthorizationFee = Element(ComplexMoney) + AuthorizationStatus = Element() + + +class AuthorizeResult(ResponseElement): + AuthorizationDetails = Element(AuthorizationDetails) + + +class GetAuthorizationDetailsResult(AuthorizeResult): + pass + + +class CaptureDetails(ResponseElement): + CaptureAmount = Element(ComplexMoney) + RefundedAmount = Element(ComplexMoney) + CaptureFee = Element(ComplexMoney) + CaptureStatus = Element() + + +class CaptureResult(ResponseElement): + CaptureDetails = Element(CaptureDetails) + + +class GetCaptureDetailsResult(CaptureResult): + pass + + +class RefundDetails(ResponseElement): + RefundAmount = Element(ComplexMoney) + FeeRefunded = Element(ComplexMoney) + RefundStatus = Element() + + +class RefundResult(ResponseElement): + RefundDetails = Element(RefundDetails) + + +class GetRefundDetails(RefundResult): pass diff --git a/tests/integration/mws/test.py b/tests/integration/mws/test.py index 394fa0e3f4..2e455851e5 100644 --- a/tests/integration/mws/test.py +++ b/tests/integration/mws/test.py @@ -71,9 +71,9 @@ def test_get_product_categories_for_asin(self): response = self.mws.get_product_categories_for_asin( MarketplaceId=self.marketplace_id, ASIN=asin) - self.assertTrue(len(response._result.Self) == 2) + self.assertEqual(len(response._result.Self), 3) categoryids = [x.ProductCategoryId for x in response._result.Self] - self.assertSequenceEqual(categoryids, ['285856', '21']) + self.assertSequenceEqual(categoryids, ['285856', '21', '491314']) @unittest.skipUnless(simple and isolator, "skipping simple test") def test_list_matching_products(self): diff --git a/tests/unit/mws/test_connection.py b/tests/unit/mws/test_connection.py index c23f4c264a..abfd258805 100644 --- a/tests/unit/mws/test_connection.py +++ b/tests/unit/mws/test_connection.py @@ -21,7 +21,8 @@ # IN THE SOFTWARE. # from boto.mws.connection import MWSConnection, api_call_map, destructure_object -from boto.mws.response import ResponseElement +from boto.mws.response import (ResponseElement, GetFeedSubmissionListResult, + ResponseFactory) from tests.unit import AWSMockServiceTestCase @@ -51,13 +52,16 @@ def default_body(self): def test_destructure_object(self): # Test that parsing of user input to Amazon input works. - response = ResponseElement(name='Prefix') + response = ResponseElement() response.C = 'four' response.D = 'five' inputs = [ ('A', 'B'), ['B', 'A'], set(['C']), False, 'String', {'A': 'one', 'B': 'two'}, response, + {'A': 'one', 'B': 'two', + 'C': [{'D': 'four', 'E': 'five'}, + {'F': 'six', 'G': 'seven'}]}, ] outputs = [ {'Prefix.1': 'A', 'Prefix.2': 'B'}, @@ -66,10 +70,16 @@ def test_destructure_object(self): {'Prefix': 'false'}, {'Prefix': 'String'}, {'Prefix.A': 'one', 'Prefix.B': 'two'}, {'Prefix.C': 'four', 'Prefix.D': 'five'}, + {'Prefix.A': 'one', 'Prefix.B': 'two', + 'Prefix.C.member.1.D': 'four', + 'Prefix.C.member.1.E': 'five', + 'Prefix.C.member.2.F': 'six', + 'Prefix.C.member.2.G': 'seven'} ] for user, amazon in zip(inputs, outputs): result = {} - destructure_object(user, result, prefix='Prefix') + members = user is inputs[-1] + destructure_object(user, result, prefix='Prefix', members=members) self.assertEqual(result, amazon) def test_built_api_call_map(self): @@ -94,6 +104,31 @@ def test_method_for(self): func = self.service_connection.method_for('NotHereNorThere') self.assertEqual(func, None) + def test_response_factory(self): + connection = self.service_connection + body = self.default_body() + action = 'GetFeedSubmissionList' + parser = connection._response_factory(action, connection=connection) + response = connection._parse_response(parser, 'text/xml', body) + self.assertEqual(response._action, action) + self.assertEqual(response.__class__.__name__, action + 'Response') + self.assertEqual(response._result.__class__, + GetFeedSubmissionListResult) + + class MyResult(GetFeedSubmissionListResult): + _hello = '_world' + + scope = {'GetFeedSubmissionListResult': MyResult} + connection._setup_factories([scope]) + + parser = connection._response_factory(action, connection=connection) + response = connection._parse_response(parser, 'text/xml', body) + self.assertEqual(response._action, action) + self.assertEqual(response.__class__.__name__, action + 'Response') + self.assertEqual(response._result.__class__, MyResult) + self.assertEqual(response._result._hello, '_world') + self.assertEqual(response._result.HasNext, 'true') + def test_get_service_status(self): with self.assertRaises(AttributeError) as err: self.service_connection.get_service_status() diff --git a/tests/unit/mws/test_response.py b/tests/unit/mws/test_response.py index 9172aa7af1..7d2549abac 100644 --- a/tests/unit/mws/test_response.py +++ b/tests/unit/mws/test_response.py @@ -30,7 +30,7 @@ class Test9Result(ResponseElement): Bam """ - obj = self.check_issue('Test9', Test9Result, text) + obj = self.check_issue(Test9Result, text) Item = obj._result.Item useful = lambda x: not x[0].startswith('_') nest = dict(filter(useful, Item.Nest.__dict__.items())) @@ -60,7 +60,7 @@ class Test8Result(ResponseElement): 67 """ - obj = self.check_issue('Test8', Test8Result, text) + obj = self.check_issue(Test8Result, text) self.assertSequenceEqual( map(int, obj._result.Item), range(4), @@ -117,7 +117,7 @@ class Test7Result(ResponseElement): """ - obj = self.check_issue('Test7', Test7Result, text) + obj = self.check_issue(Test7Result, text) item = obj._result.Item self.assertEqual(len(item), 3) nests = [z.Nest for z in filter(lambda x: x.Nest, item)] @@ -152,7 +152,7 @@ class Test6Result(ResponseElement): Six """ - obj = self.check_issue('Test6', Test6Result, text) + obj = self.check_issue(Test6Result, text) self.assertSequenceEqual( [e.Value for e in obj._result.Item], ['One', 'Two', 'Six'], @@ -168,7 +168,7 @@ class Test5Result(ResponseElement): text = """ """ - obj = self.check_issue('Test5', Test5Result, text) + obj = self.check_issue(Test5Result, text) self.assertSequenceEqual(obj._result.Item, []) def test_parsing_missing_member_list(self): @@ -177,7 +177,7 @@ class Test4Result(ResponseElement): text = """ """ - obj = self.check_issue('Test4', Test4Result, text) + obj = self.check_issue(Test4Result, text) self.assertSequenceEqual(obj._result.Item, []) def test_parsing_element_lists(self): @@ -190,7 +190,7 @@ class Test1Result(ResponseElement): Baz Zoo """ - obj = self.check_issue('Test1', Test1Result, text) + obj = self.check_issue(Test1Result, text) self.assertTrue(len(obj._result.Item) == 3) elements = lambda x: getattr(x, 'Foo', getattr(x, 'Zip', '?')) elements = map(elements, obj._result.Item) @@ -202,7 +202,7 @@ class Test2Result(ResponseElement): text = """ """ - obj = self.check_issue('Test2', Test2Result, text) + obj = self.check_issue(Test2Result, text) self.assertEqual(obj._result.Item, []) def test_parsing_simple_lists(self): @@ -214,12 +214,14 @@ class Test3Result(ResponseElement): Bif Baz """ - obj = self.check_issue('Test3', Test3Result, text) + obj = self.check_issue(Test3Result, text) self.assertSequenceEqual(obj._result.Item, ['Bar', 'Bif', 'Baz']) - def check_issue(self, action, klass, text): - cls = ResponseFactory(action, force=klass) - return self.service_connection._parse_response(cls, text) + def check_issue(self, klass, text): + action = klass.__name__[:-len('Result')] + factory = ResponseFactory(scopes=[{klass.__name__: klass}]) + parser = factory(action, connection=self.service_connection) + return self.service_connection._parse_response(parser, 'text/xml', text) if __name__ == "__main__": From 3cd9e47b91429d7c5c4ccaa576c4305763315d1f Mon Sep 17 00:00:00 2001 From: Andy Davidoff Date: Sat, 5 Apr 2014 02:32:53 -0400 Subject: [PATCH 44/60] GetMyPriceForSKU, GetMyPriceForASIN from @beebus --- boto/mws/connection.py | 16 ++++++++++++++++ boto/mws/response.py | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/boto/mws/connection.py b/boto/mws/connection.py index 3cf0389db2..f73417a107 100644 --- a/boto/mws/connection.py +++ b/boto/mws/connection.py @@ -852,6 +852,22 @@ def get_products_service_status(self, request, response, **kw): """ return self._post_request(request, kw, response) + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 10, 'GetMyPriceForSKU') + def get_my_price_for_sku(self, request, response, **kw): + """Returns pricing information for your own offer listings, based on SellerSKU. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 10, 'GetMyPriceForASIN') + def get_my_price_for_asin(self, request, response, **kw): + """Returns pricing information for your own offer listings, based on ASIN. + """ + return self._post_request(request, kw, response) + @api_action('Sellers', 15, 60) def list_marketplace_participations(self, request, response, **kw): """Returns a list of marketplaces that the seller submitting diff --git a/boto/mws/response.py b/boto/mws/response.py index 93485479c3..34e96c3efc 100644 --- a/boto/mws/response.py +++ b/boto/mws/response.py @@ -531,6 +531,11 @@ class LowestOfferListing(ResponseElement): Price = Element(Price) +class Offer(ResponseElement): + BuyingPrice = Element(Price) + RegularPrice = Element(ComplexMoney) + + class Product(ResponseElement): _namespace = 'ns2' Identifiers = Element(MarketplaceASIN=Element(), @@ -548,6 +553,9 @@ class Product(ResponseElement): LowestOfferListings = Element( LowestOfferListing=ElementList(LowestOfferListing), ) + Offers = Element( + Offer=ElementList(Offer), + ) class ListMatchingProductsResult(ResponseElement): @@ -591,6 +599,14 @@ class GetLowestOfferListingsForASINResponse(ProductsBulkOperationResponse): pass +class GetMyPriceForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetMyPriceForASINResponse(ProductsBulkOperationResponse): + pass + + class ProductCategory(ResponseElement): def __init__(self, *args, **kw): From ccf016395cfbf1a2c898f3b751a8f8d82f62f7d2 Mon Sep 17 00:00:00 2001 From: Aron Rosenberg Date: Fri, 11 Apr 2014 12:07:40 -0700 Subject: [PATCH 45/60] Implement new Cloudsearch API 2013-01-01 as cloudsearch2 module --- boto/__init__.py | 20 +- boto/cloudsearch2/__init__.py | 45 + boto/cloudsearch2/document.py | 269 +++++ boto/cloudsearch2/domain.py | 399 ++++++++ boto/cloudsearch2/layer1.py | 916 ++++++++++++++++++ boto/cloudsearch2/layer2.py | 77 ++ boto/cloudsearch2/optionstatus.py | 263 +++++ boto/cloudsearch2/search.py | 364 +++++++ tests/integration/cloudsearch2/__init__.py | 21 + .../cloudsearch2/test_cert_verification.py | 39 + tests/integration/cloudsearch2/test_layers.py | 74 ++ tests/unit/cloudsearch2/__init__.py | 1 + tests/unit/cloudsearch2/test_connection.py | 228 +++++ tests/unit/cloudsearch2/test_document.py | 324 +++++++ tests/unit/cloudsearch2/test_exceptions.py | 37 + tests/unit/cloudsearch2/test_search.py | 431 ++++++++ 16 files changed, 3507 insertions(+), 1 deletion(-) create mode 100644 boto/cloudsearch2/__init__.py create mode 100644 boto/cloudsearch2/document.py create mode 100644 boto/cloudsearch2/domain.py create mode 100644 boto/cloudsearch2/layer1.py create mode 100644 boto/cloudsearch2/layer2.py create mode 100644 boto/cloudsearch2/optionstatus.py create mode 100644 boto/cloudsearch2/search.py create mode 100644 tests/integration/cloudsearch2/__init__.py create mode 100644 tests/integration/cloudsearch2/test_cert_verification.py create mode 100644 tests/integration/cloudsearch2/test_layers.py create mode 100644 tests/unit/cloudsearch2/__init__.py create mode 100644 tests/unit/cloudsearch2/test_connection.py create mode 100644 tests/unit/cloudsearch2/test_document.py create mode 100644 tests/unit/cloudsearch2/test_exceptions.py create mode 100644 tests/unit/cloudsearch2/test_search.py diff --git a/boto/__init__.py b/boto/__init__.py index 35539123f2..791a5e7f2d 100644 --- a/boto/__init__.py +++ b/boto/__init__.py @@ -653,7 +653,7 @@ def connect_cloudsearch(aws_access_key_id=None, :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key - :rtype: :class:`boto.ec2.autoscale.CloudSearchConnection` + :rtype: :class:`boto.cloudsearch.layer2.Layer2` :return: A connection to Amazon's CloudSearch service """ from boto.cloudsearch.layer2 import Layer2 @@ -661,6 +661,24 @@ def connect_cloudsearch(aws_access_key_id=None, **kwargs) +def connect_cloudsearch2(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudsearch2.layer2.Layer2` + :return: A connection to Amazon's CloudSearch2 service + """ + from boto.cloudsearch2.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, + **kwargs) + + def connect_beanstalk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): diff --git a/boto/cloudsearch2/__init__.py b/boto/cloudsearch2/__init__.py new file mode 100644 index 0000000000..bffa50d9a5 --- /dev/null +++ b/boto/cloudsearch2/__init__.py @@ -0,0 +1,45 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions + + +def regions(): + """ + Get all available regions for the Amazon CloudSearch service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + import boto.cloudsearch2.layer1 + return get_regions( + 'cloudsearch', + connection_cls=boto.cloudsearch2.layer1.Layer1 + ) + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/boto/cloudsearch2/document.py b/boto/cloudsearch2/document.py new file mode 100644 index 0000000000..d6220b518e --- /dev/null +++ b/boto/cloudsearch2/document.py @@ -0,0 +1,269 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto.exception +from boto.compat import json +import requests +import boto + + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + pass + + +class EncodingError(Exception): + """ + Content sent for Cloud Search indexing was incorrectly encoded. + + This usually happens when a document is marked as unicode but non-unicode + characters are present. + """ + pass + + +class ContentTooLongError(Exception): + """ + Content sent for Cloud Search indexing was too long + + This will usually happen when documents queued for indexing add up to more + than the limit allowed per upload batch (5MB) + + """ + pass + + +class DocumentServiceConnection(object): + """ + A CloudSearch document service. + + The DocumentServiceConection is used to add, remove and update documents in + CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format). + + To generate an appropriate SDF, use :func:`add` to add or update documents, + as well as :func:`delete` to remove documents. + + Once the set of documents is ready to be index, use :func:`commit` to send the + commands to CloudSearch. + + If there are a lot of documents to index, it may be preferable to split the + generation of SDF data and the actual uploading into CloudSearch. Retrieve + the current SDF with :func:`get_sdf`. If this file is the uploaded into S3, + it can be retrieved back afterwards for upload into CloudSearch using + :func:`add_sdf_from_s3`. + + The SDF is not cleared after a :func:`commit`. If you wish to continue + using the DocumentServiceConnection for another batch upload of commands, + you will need to :func:`clear_sdf` first to stop the previous batch of + commands from being uploaded again. + + """ + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + if not self.endpoint: + self.endpoint = domain.doc_service_endpoint + self.documents_batch = [] + self._sdf = None + + def add(self, _id, version, fields, lang='en'): + """ + Add a document to be processed by the DocumentService + + The document will not actually be added until :func:`commit` is called + + :type _id: string + :param _id: A unique ID used to refer to this document. + + :type version: int + :param version: Version of the document being indexed. If a file is + being reindexed, the version should be higher than the existing one + in CloudSearch. + + :type fields: dict + :param fields: A dictionary of key-value pairs to be uploaded . + + :type lang: string + :param lang: The language code the data is in. Only 'en' is currently + supported + """ + + d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang, + 'fields': fields} + self.documents_batch.append(d) + + def delete(self, _id, version): + """ + Schedule a document to be removed from the CloudSearch service + + The document will not actually be scheduled for removal until :func:`commit` is called + + :type _id: string + :param _id: The unique ID of this document. + + :type version: int + :param version: Version of the document to remove. The delete will only + occur if this version number is higher than the version currently + in the index. + """ + + d = {'type': 'delete', 'id': _id, 'version': version} + self.documents_batch.append(d) + + def get_sdf(self): + """ + Generate the working set of documents in Search Data Format (SDF) + + :rtype: string + :returns: JSON-formatted string of the documents in SDF + """ + + return self._sdf if self._sdf else json.dumps(self.documents_batch) + + def clear_sdf(self): + """ + Clear the working documents from this DocumentServiceConnection + + This should be used after :func:`commit` if the connection will be reused + for another set of documents. + """ + + self._sdf = None + self.documents_batch = [] + + def add_sdf_from_s3(self, key_obj): + """ + Load an SDF from S3 + + Using this method will result in documents added through + :func:`add` and :func:`delete` being ignored. + + :type key_obj: :class:`boto.s3.key.Key` + :param key_obj: An S3 key which contains an SDF + """ + #@todo:: (lucas) would be nice if this could just take an s3://uri..." + + self._sdf = key_obj.get_contents_as_string() + + def commit(self): + """ + Actually send an SDF to CloudSearch for processing + + If an SDF file has been explicitly loaded it will be used. Otherwise, + documents added through :func:`add` and :func:`delete` will be used. + + :rtype: :class:`CommitResponse` + :returns: A summary of documents added and deleted + """ + + sdf = self.get_sdf() + + if ': null' in sdf: + boto.log.error('null value in sdf detected. This will probably raise ' + '500 error.') + index = sdf.index(': null') + boto.log.error(sdf[index - 100:index + 100]) + + url = "http://%s/%s/documents/batch" % (self.endpoint, self.domain.layer1.APIVersion) + + # Keep-alive is automatic in a post-1.0 requests world. + session = requests.Session() + adapter = requests.adapters.HTTPAdapter( + pool_connections=20, + pool_maxsize=50, + max_retries=5 + ) + session.mount('http://', adapter) + session.mount('https://', adapter) + r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'}) + + return CommitResponse(r, self, sdf) + + +class CommitResponse(object): + """Wrapper for response to Cloudsearch document batch commit. + + :type response: :class:`requests.models.Response` + :param response: Response from Cloudsearch /documents/batch API + + :type doc_service: :class:`boto.cloudsearch2.document.DocumentServiceConnection` + :param doc_service: Object containing the documents posted and methods to + retry + + :raises: :class:`boto.exception.BotoServerError` + :raises: :class:`boto.cloudsearch2.document.SearchServiceException` + :raises: :class:`boto.cloudsearch2.document.EncodingError` + :raises: :class:`boto.cloudsearch2.document.ContentTooLongError` + """ + def __init__(self, response, doc_service, sdf): + self.response = response + self.doc_service = doc_service + self.sdf = sdf + + try: + self.content = json.loads(response.content) + except: + boto.log.error('Error indexing documents.\nResponse Content:\n{0}\n\n' + 'SDF:\n{1}'.format(response.content, self.sdf)) + raise boto.exception.BotoServerError(self.response.status_code, '', + body=response.content) + + self.status = self.content['status'] + if self.status == 'error': + self.errors = [e.get('message') for e in self.content.get('errors', [])] + for e in self.errors: + if "Illegal Unicode character" in e: + raise EncodingError("Illegal Unicode character in document") + elif e == "The Content-Length is too long": + raise ContentTooLongError("Content was too long") + else: + self.errors = [] + + self.adds = self.content['adds'] + self.deletes = self.content['deletes'] + self._check_num_ops('add', self.adds) + self._check_num_ops('delete', self.deletes) + + def _check_num_ops(self, type_, response_num): + """Raise exception if number of ops in response doesn't match commit + + :type type_: str + :param type_: Type of commit operation: 'add' or 'delete' + + :type response_num: int + :param response_num: Number of adds or deletes in the response. + + :raises: :class:`boto.cloudsearch2.document.CommitMismatchError` + """ + commit_num = len([d for d in self.doc_service.documents_batch + if d['type'] == type_]) + + if response_num != commit_num: + raise CommitMismatchError( + 'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\ + .format(type_, commit_num, response_num)) diff --git a/boto/cloudsearch2/domain.py b/boto/cloudsearch2/domain.py new file mode 100644 index 0000000000..cca13c1900 --- /dev/null +++ b/boto/cloudsearch2/domain.py @@ -0,0 +1,399 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from .optionstatus import IndexFieldStatus +from .optionstatus import ServicePoliciesStatus +from .optionstatus import ExpressionStatus +from .optionstatus import AvailabilityOptionsStatus +from .optionstatus import ScalingParametersStatus +from .document import DocumentServiceConnection +from .search import SearchConnection + + +def handle_bool(value): + if value in [True, 'true', 'True', 'TRUE', 1]: + return True + return False + + +class Domain(object): + """ + A Cloudsearch domain. + + :ivar name: The name of the domain. + + :ivar id: The internally generated unique identifier for the domain. + + :ivar created: A boolean which is True if the domain is + created. It can take several minutes to initialize a domain + when CreateDomain is called. Newly created search domains are + returned with a False value for Created until domain creation + is complete + + :ivar deleted: A boolean which is True if the search domain has + been deleted. The system must clean up resources dedicated to + the search domain when delete is called. Newly deleted + search domains are returned from list_domains with a True + value for deleted for several minutes until resource cleanup + is complete. + + :ivar processing: True if processing is being done to activate the + current domain configuration. + + :ivar num_searchable_docs: The number of documents that have been + submittted to the domain and indexed. + + :ivar requires_index_document: True if index_documents needs to be + called to activate the current domain configuration. + + :ivar search_instance_count: The number of search instances that are + available to process search requests. + + :ivar search_instance_type: The instance type that is being used to + process search requests. + + :ivar search_partition_count: The number of partitions across which + the search index is spread. + """ + + def __init__(self, layer1, data): + """ + Constructor - Create a domain object from a layer1 and data params + + :type layer1: :class:`boto.cloudsearch2.layer1.Layer1` object + :param layer1: A :class:`boto.cloudsearch2.layer1.Layer1` object + which is used to perform operations on the domain. + """ + self.layer1 = layer1 + self.update_from_data(data) + + def update_from_data(self, data): + self.created = data['created'] + self.deleted = data['deleted'] + self.processing = data['processing'] + self.requires_index_documents = data['requires_index_documents'] + self.domain_id = data['domain_id'] + self.domain_name = data['domain_name'] + self.search_instance_count = data['search_instance_count'] + self.search_instance_type = data.get('search_instance_type', None) + self.search_partition_count = data['search_partition_count'] + self._doc_service = data['doc_service'] + self._service_arn = data['arn'] + self._search_service = data['search_service'] + + @property + def service_arn(self): + return self._service_arn + + @property + def doc_service_endpoint(self): + return self._doc_service['endpoint'] + + @property + def search_service_endpoint(self): + return self._search_service['endpoint'] + + @property + def created(self): + return self._created + + @created.setter + def created(self, value): + self._created = handle_bool(value) + + @property + def deleted(self): + return self._deleted + + @deleted.setter + def deleted(self, value): + self._deleted = handle_bool(value) + + @property + def processing(self): + return self._processing + + @processing.setter + def processing(self, value): + self._processing = handle_bool(value) + + @property + def requires_index_documents(self): + return self._requires_index_documents + + @requires_index_documents.setter + def requires_index_documents(self, value): + self._requires_index_documents = handle_bool(value) + + @property + def search_partition_count(self): + return self._search_partition_count + + @search_partition_count.setter + def search_partition_count(self, value): + self._search_partition_count = int(value) + + @property + def search_instance_count(self): + return self._search_instance_count + + @search_instance_count.setter + def search_instance_count(self, value): + self._search_instance_count = int(value) + + @property + def name(self): + return self.domain_name + + @property + def id(self): + return self.domain_id + + def delete(self): + """ + Delete this domain and all index data associated with it. + """ + return self.layer1.delete_domain(self.name) + + def get_analysis_schemes(self): + """ + Return a list of Analysis Scheme objects. + """ + return self.layer1.describe_analysis_schemes(self.name) + + def get_availability_options(self): + """ + Return a :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` + object representing the currently defined availability options for + the domain. + :return: OptionsStatus object + :rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` object + """ + return AvailabilityOptionsStatus( + self, None, self.layer1.describe_availability_options, + self.layer1.update_availability_options) + + def get_scaling_options(self): + """ + Return a :class:`boto.cloudsearch2.option.ScalingParametersStatus` + object representing the currently defined scaling options for the + domain. + :return: ScalingParametersStatus object + :rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus` object + """ + return ScalingParametersStatus( + self, None, + self.layer1.describe_scaling_parameters, + self.layer1.update_scaling_parameters) + + def get_access_policies(self): + """ + Return a :class:`boto.cloudsearch2.option.ServicePoliciesStatus` + object representing the currently defined access policies for the + domain. + :return: ServicePoliciesStatus object + :rtype: :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object + """ + return ServicePoliciesStatus(self, None, + self.layer1.describe_service_access_policies, + self.layer1.update_service_access_policies) + + def index_documents(self): + """ + Tells the search domain to start indexing its documents using + the latest text processing options and IndexFields. This + operation must be invoked to make options whose OptionStatus + has OptioState of RequiresIndexDocuments visible in search + results. + """ + self.layer1.index_documents(self.name) + + def get_index_fields(self, field_names=None): + """ + Return a list of index fields defined for this domain. + :return: list of IndexFieldStatus objects + :rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus` object + """ + data = self.layer1.describe_index_fields(self.name, field_names) + return [IndexFieldStatus(self, d) for d in data] + + def create_index_field(self, field_name, field_type, + default='', facet=False, returnable=False, + searchable=False, sortable=False, + highlight=False, source_field=None, + analysis_scheme=None): + """ + Defines an ``IndexField``, either replacing an existing + definition or creating a new one. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_name: string + :param field_name: The name of a field in the search index. + + :type field_type: string + :param field_type: The type of field. Valid values are + int | double | literal | text | date | latlon | + int-array | double-array | literal-array | text-array | date-array + + :type default: string or int + :param default: The default value for the field. If the + field is of type ``int`` this should be an integer value. + Otherwise, it's a string. + + :type facet: bool + :param facet: A boolean to indicate whether facets + are enabled for this field or not. Does not apply to + fields of type ``int, int-array, text, text-array``. + + :type returnable: bool + :param returnable: A boolean to indicate whether values + of this field can be returned in search results or + used in ranking. + + :type searchable: bool + :param searchable: A boolean to indicate whether search + is enabled for this field or not. + + :type sortable: bool + :param sortable: A boolean to indicate whether sorting + is enabled for this field or not. Does not apply to + fields of array types. + + :type highlight: bool + :param highlight: A boolean to indicate whether highlighting + is enabled for this field or not. Does not apply to + fields of type ``double, int, date, latlon`` + + :type source_field: list of strings or string + :param source_field: For array types, this is the list of fields + to treat as the source. For singular types, pass a string only. + + :type analysis_scheme: string + :param analysis_scheme: The analysis scheme to use for this field. + Only applies to ``text | text-array`` field types + + :return: IndexFieldStatus objects + :rtype: :class:`boto.cloudsearch2.option.IndexFieldStatus` object + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + data = self.layer1.define_index_field(self.name, field_name, field_type, + default=default, facet=facet, + returnable=returnable, + searchable=searchable, + sortable=sortable, + highlight=highlight, + source_field=source_field, + analysis_scheme=analysis_scheme) + return IndexFieldStatus(self, data, + self.layer1.describe_index_fields) + + def get_expressions(self, names=None): + """ + Return a list of rank expressions defined for this domain. + :return: list of ExpressionStatus objects + :rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus` object + """ + fn = self.layer1.describe_expressions + data = fn(self.name, names) + return [ExpressionStatus(self, d, fn) for d in data] + + def create_expression(self, name, value): + """ + Create a new expression. + + :type name: string + :param name: The name of an expression for processing + during a search request. + + :type value: string + :param value: The expression to evaluate for ranking + or thresholding while processing a search request. The + Expression syntax is based on JavaScript expressions + and supports: + + * Single value, sort enabled numeric fields (int, double, date) + * Other expressions + * The _score variable, which references a document's relevance score + * The _time variable, which references the current epoch time + * Integer, floating point, hex, and octal literals + * Arithmetic operators: + - * / % + * Bitwise operators: | & ^ ~ << >> >>> + * Boolean operators (including the ternary operator): && || ! ?: + * Comparison operators: < <= == >= > + * Mathematical functions: abs ceil exp floor ln log2 log10 logn + max min pow sqrt pow + * Trigonometric functions: acos acosh asin asinh atan atan2 atanh + cos cosh sin sinh tanh tan + * The haversin distance function + + Expressions always return an integer value from 0 to the maximum + 64-bit signed integer value (2^63 - 1). Intermediate results are + calculated as double-precision floating point values and the return + value is rounded to the nearest integer. If the expression is invalid + or evaluates to a negative value, it returns 0. If the expression + evaluates to a value greater than the maximum, it returns the maximum + value. + + The source data for an Expression can be the name of an + IndexField of type int or double, another Expression or the + reserved name _score. The _score source is + defined to return as a double from 0 to 10.0 (inclusive) to + indicate how relevant a document is to the search request, + taking into account repetition of search terms in the + document and proximity of search terms to each other in + each matching IndexField in the document. + + For more information about using rank expressions to + customize ranking, see the Amazon CloudSearch Developer + Guide. + + :return: ExpressionStatus object + :rtype: :class:`boto.cloudsearch2.option.ExpressionStatus` object + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + data = self.layer1.define_expression(self.name, name, value) + return ExpressionStatus(self, data, + self.layer1.describe_expressions) + + def get_document_service(self): + return DocumentServiceConnection(domain=self) + + def get_search_service(self): + return SearchConnection(domain=self) + + def __repr__(self): + return '' % self.domain_name diff --git a/boto/cloudsearch2/layer1.py b/boto/cloudsearch2/layer1.py new file mode 100644 index 0000000000..08c52e8558 --- /dev/null +++ b/boto/cloudsearch2/layer1.py @@ -0,0 +1,916 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +import boto.jsonresponse +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo + +#boto.set_stream_logger('cloudsearch') + + +def do_bool(val): + return 'true' if val in [True, 1, '1', 'true'] else 'false' + + +class Layer1(AWSQueryConnection): + + APIVersion = '2013-01-01' + #AuthServiceName = 'sqs' + DefaultRegionName = boto.config.get('Boto', 'cs_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'cs_region_endpoint', + 'cloudsearch.us-east-1.amazonaws.com') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, host=None, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + api_version=None, security_token=None, + validate_certs=True, profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__( + self, + host=self.region.endpoint, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + is_secure=is_secure, + port=port, + proxy=proxy, + proxy_port=proxy_port, + proxy_user=proxy_user, + proxy_pass=proxy_pass, + debug=debug, + https_connection_factory=https_connection_factory, + path=path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_response(self, doc_path, action, params, path='/', + parent=None, verb='GET', list_marker=None): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if response.status == 200: + e = boto.jsonresponse.Element( + list_marker=list_marker if list_marker else 'Set', + pythonize_name=True) + h = boto.jsonresponse.XmlHandler(e, parent) + h.parse(body) + inner = e + for p in doc_path: + inner = inner.get(p) + if not inner: + return None if list_marker is None else [] + if isinstance(inner, list): + return inner + else: + return dict(**inner) + else: + raise self.ResponseError(response.status, response.reason, body) + + def create_domain(self, domain_name): + """ + Create a new search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, LimitExceededException + """ + doc_path = ('create_domain_response', + 'create_domain_result', + 'domain_status') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'CreateDomain', + params, verb='POST') + + def define_analysis_scheme(self, domain_name, name, language, + algorithmic_stemming="none", stemming_dictionary=None, + stopwords=None, synonyms=None): + """ + Updates stemming options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type name: str + :param name: Name of the analysis scheme + + :type language: str + :param language: IETF RFC 4646 lang code or 'mul' for multiple + languages. + + :type algorithmic_stemming: str + :param algorithmic_stemming: Which type of stemming to use. + one of ``none | minimal | light | full`` + + :type stemming_dictionary: dict + :param stemming_dictionary: dict of stemming words + ``{"running": "run", "jumping": "jump"}`` + + :type stopwords: list of strings + :param stopwords: list of stopwords + + :type synonyms: dict + :param synonyms: dict of Array of words to use as synonyms + ``{"aliases": {"running": ["run", "ran"], "jumping": ["jump", "jumped"]}, + "groups": [["sit", "sitting", "sat"], ["hit", "hitting"]]}`` + + :raises: BaseException, InternalException, InvalidTypeException, + LimitExceededException, ResourceNotFoundException + """ + doc_path = ('define_analysis_scheme_response', + 'define_analysis_scheme_result', + 'analysis_scheme') + params = {'DomainName': domain_name, 'AnalysisScheme.AnalysisSchemeName': name, + 'AnalysisScheme.AnalysisSchemeLanguage': language, + 'AnalysisScheme.AnalysisOptions.AlgorithmicStemming': algorithmic_stemming, + 'AnalysisScheme.AnalysisOptions.StemmingDictionary': + json.dumps(stemming_dictionary) if stemming_dictionary else dict(), + 'AnalysisScheme.AnalysisOptions.Stopwords': + json.dumps(stopwords) if stopwords else list(), + 'AnalysisScheme.AnalysisOptions.Synonyms': + json.dumps(synonyms) if synonyms else dict(), + } + + return self.get_response(doc_path, 'DefineAnalysisScheme', + params, verb='POST') + + def define_expression(self, domain_name, name, value): + """ + Defines an Expression, either replacing an existing + definition or creating a new one. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type name: string + :param name: The name of an expression. + + :type value: string + :param value: The expression to evaluate for ranking or + thresholding while processing a search request. The + Expression syntax is based on JavaScript and supports: + + * Single value, sort enabled numeric fields (int, double, date) + * Other expressions + * The _score variable, which references a document's relevance score + * The _time variable, which references the current epoch time + * Integer, floating point, hex, and octal literals + * Arithmetic operators: + - * / % + * Bitwise operators: | & ^ ~ << >> >>> + * Boolean operators (including the ternary operator): && || ! ?: + * Comparison operators: < <= == >= > + * Mathematical functions: abs ceil exp floor ln log2 log10 logn + max min pow sqrt pow + * Trigonometric functions: acos acosh asin asinh atan atan2 atanh + cos cosh sin sinh tanh tan + * The haversin distance function + + Expressions always return an integer value from 0 to the maximum + 64-bit signed integer value (2^63 - 1). Intermediate results are + calculated as double-precision floating point values and the return + value is rounded to the nearest integer. If the expression is invalid + or evaluates to a negative value, it returns 0. If the expression + evaluates to a value greater than the maximum, it returns the maximum + value. + + The source data for an Expression can be the name of an + IndexField of type int or double, another Expression or the + reserved name _score, or the functions above. The _score source is + defined to return as a double with a floor of 0 to + indicate how relevant a document is to the search request, + taking into account repetition of search terms in the + document and proximity of search terms to each other in + each matching IndexField in the document. + + For more information about using expressions to customize results, + see the Amazon CloudSearch Developer Guide. + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + doc_path = ('define_expression_response', + 'define_expression_result', + 'expression') + params = {'DomainName': domain_name, + 'Expression.ExpressionValue': value, + 'Expression.ExpressionName': name} + return self.get_response(doc_path, 'DefineExpression', + params, verb='POST') + + def define_index_field(self, domain_name, field_name, field_type, + default=None, facet=False, returnable=False, + searchable=False, sortable=False, + highlight=False, source_field=None, + analysis_scheme=None): + """ + Defines an ``IndexField``, either replacing an existing + definition or creating a new one. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_name: string + :param field_name: The name of a field in the search index. + + :type field_type: string + :param field_type: The type of field. Valid values are + int | double | literal | text | date | latlon | + int-array | double-array | literal-array | text-array | date-array + + :type default: string or int + :param default: The default value for the field. If the + field is of type ``int`` this should be an integer value. + Otherwise, it's a string. + + :type facet: bool + :param facet: A boolean to indicate whether facets + are enabled for this field or not. Does not apply to + fields of type ``int, int-array, text, text-array``. + + :type returnable: bool + :param returnable: A boolean to indicate whether values + of this field can be returned in search results or + used in ranking. + + :type searchable: bool + :param searchable: A boolean to indicate whether search + is enabled for this field or not. + + :type sortable: bool + :param sortable: A boolean to indicate whether sorting + is enabled for this field or not. Does not apply to + fields of array types. + + :type highlight: bool + :param highlight: A boolean to indicate whether highlighting + is enabled for this field or not. Does not apply to + fields of type ``double, int, date, latlon`` + + :type source_field: list of strings or string + :param source_field: For array types, this is the list of fields + to treat as the source. For singular types, pass a string only. + + :type analysis_scheme: string + :param analysis_scheme: The analysis scheme to use for this field. + Only applies to ``text | text-array`` field types + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + doc_path = ('define_index_field_response', + 'define_index_field_result', + 'index_field') + params = {'DomainName': domain_name, + 'IndexField.IndexFieldName': field_name, + 'IndexField.IndexFieldType': field_type} + if field_type == 'literal': + if default: + params['IndexField.LiteralOptions.DefaultValue'] = default + params['IndexField.LiteralOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.LiteralOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.LiteralOptions.SearchEnabled'] = do_bool(searchable) + params['IndexField.LiteralOptions.SortEnabled'] = do_bool(sortable) + if source_field: + params['IndexField.LiteralOptions.SourceField'] = source_field + elif field_type == 'literal-array': + if default: + params['IndexField.LiteralArrayOptions.DefaultValue'] = default + params['IndexField.LiteralArrayOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.LiteralArrayOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.LiteralArrayOptions.SearchEnabled'] = do_bool(searchable) + if source_field: + params['IndexField.LiteralArrayOptions.SourceFields'] = ','.join(source_field) + elif field_type == 'int': + if default: + params['IndexField.IntOptions.DefaultValue'] = default + params['IndexField.IntOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.IntOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.IntOptions.SearchEnabled'] = do_bool(searchable) + params['IndexField.IntOptions.SortEnabled'] = do_bool(sortable) + if source_field: + params['IndexField.IntOptions.SourceField'] = source_field + elif field_type == 'int-array': + if default: + params['IndexField.IntArrayOptions.DefaultValue'] = default + params['IndexField.IntArrayOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.IntArrayOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.IntArrayOptions.SearchEnabled'] = do_bool(searchable) + if source_field: + params['IndexField.IntArrayOptions.SourceFields'] = ','.join(source_field) + elif field_type == 'date': + if default: + params['IndexField.DateOptions.DefaultValue'] = default + params['IndexField.DateOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.DateOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.DateOptions.SearchEnabled'] = do_bool(searchable) + params['IndexField.DateOptions.SortEnabled'] = do_bool(sortable) + if source_field: + params['IndexField.DateOptions.SourceField'] = source_field + elif field_type == 'date-array': + if default: + params['IndexField.DateArrayOptions.DefaultValue'] = default + params['IndexField.DateArrayOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.DateArrayOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.DateArrayOptions.SearchEnabled'] = do_bool(searchable) + if source_field: + params['IndexField.DateArrayOptions.SourceFields'] = ','.join(source_field) + elif field_type == 'double': + if default: + params['IndexField.DoubleOptions.DefaultValue'] = default + params['IndexField.DoubleOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.DoubleOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.DoubleOptions.SearchEnabled'] = do_bool(searchable) + params['IndexField.DoubleOptions.SortEnabled'] = do_bool(sortable) + if source_field: + params['IndexField.DoubleOptions.SourceField'] = source_field + elif field_type == 'double-array': + if default: + params['IndexField.DoubleArrayOptions.DefaultValue'] = default + params['IndexField.DoubleArrayOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.DoubleArrayOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.DoubleArrayOptions.SearchEnabled'] = do_bool(searchable) + if source_field: + params['IndexField.DoubleArrayOptions.SourceFields'] = ','.join(source_field) + elif field_type == 'text': + if default: + params['IndexField.TextOptions.DefaultValue'] = default + params['IndexField.TextOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.TextOptions.HighlightEnabled'] = do_bool(highlight) + params['IndexField.TextOptions.SortEnabled'] = do_bool(sortable) + if source_field: + params['IndexField.TextOptions.SourceField'] = source_field + if analysis_scheme: + params['IndexField.TextOptions.AnalysisScheme'] = analysis_scheme + elif field_type == 'text-array': + if default: + params['IndexField.TextArrayOptions.DefaultValue'] = default + params['IndexField.TextArrayOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.TextArrayOptions.HighlightEnabled'] = do_bool(highlight) + if source_field: + params['IndexField.TextArrayOptions.SourceFields'] = ','.join(source_field) + if analysis_scheme: + params['IndexField.TextArrayOptions.AnalysisScheme'] = analysis_scheme + elif field_type == 'latlon': + if default: + params['IndexField.LatLonOptions.DefaultValue'] = default + params['IndexField.LatLonOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.LatLonOptions.ReturnEnabled'] = do_bool(returnable) + params['IndexField.LatLonOptions.SearchEnabled'] = do_bool(searchable) + params['IndexField.LatLonOptions.SortEnabled'] = do_bool(sortable) + if source_field: + params['IndexField.LatLonOptions.SourceField'] = source_field + + return self.get_response(doc_path, 'DefineIndexField', + params, verb='POST') + + def define_suggester(self, domain_name, name, source_field, + fuzzy_matching=None, sort_expression=None): + """ + Defines an Expression, either replacing an existing + definition or creating a new one. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type name: string + :param name: The name of an suggester to use. + + :type source_field: string + :param source_field: The source field name to use for the ``Suggester`` + + :type fuzzy_matching: string or None + :param fuzzy_matching: The optional type of fuzzy matching to use. One of + none | low | high + + :type sort_expression: string or None + :param sort_expression: The optional sort expression to use + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + doc_path = ('define_expression_response', + 'define_expression_result', + 'expression') + params = {'DomainName': domain_name, + 'Suggester.SuggesterName': name, + 'Suggester.DocumentSuggesterOptions.SourceField': source_field} + if fuzzy_matching is not None: + params['Suggester.DocumentSuggesterOptions.FuzzyMatching'] = fuzzy_matching + if sort_expression is not None: + params['Suggester.DocumentSuggesterOptions.SortExpression'] = sort_expression + + return self.get_response(doc_path, 'DefineExpression', params, + verb='POST') + + def delete_analysis_scheme(self, domain_name, scheme_name): + """ + Deletes an existing ``AnalysisScheme`` from the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type scheme_name: string + :param scheme_name: The analysis scheme name to delete + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('delete_analysis_scheme_response', + 'delete_analysis_scheme_result', + 'analysis_scheme') + params = {'DomainName': domain_name, + 'AnalysisSchemeName': scheme_name} + return self.get_response(doc_path, 'DeleteAnalysisScheme', + params, verb='POST') + + def delete_domain(self, domain_name): + """ + Delete a search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException + """ + doc_path = ('delete_domain_response', + 'delete_domain_result', + 'domain_status') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DeleteDomain', + params, verb='POST') + + def delete_index_field(self, domain_name, field_name): + """ + Deletes an existing ``IndexField`` from the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_name: string + :param field_name: A string that represents the name of + an index field. Field names must begin with a letter and + can contain the following characters: a-z (lowercase), + 0-9, and _ (underscore). Uppercase letters and hyphens are + not allowed. The names "body", "docid", and + "text_relevance" are reserved and cannot be specified as + field or rank expression names. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('delete_index_field_response', + 'delete_index_field_result', + 'index_field') + params = {'DomainName': domain_name, + 'IndexFieldName': field_name} + return self.get_response(doc_path, 'DeleteIndexField', + params, verb='POST') + + def delete_expression(self, domain_name, name): + """ + Deletes an existing ``Expression`` from the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type name: string + :param name: Name of the ``Expression`` to delete. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('delete_expression_response', + 'delete_expression_result', + 'expression') + params = {'DomainName': domain_name, 'ExpressionName': name} + return self.get_response(doc_path, 'DeleteExpression', + params, verb='POST') + + def delete_suggester(self, domain_name, name): + """ + Deletes an existing ``Suggester`` from the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type name: string + :param name: Name of the ``Suggester`` to delete. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('delete_suggester_response', + 'delete_suggester_result', + 'suggester') + params = {'DomainName': domain_name, 'SuggesterName': name} + return self.get_response(doc_path, 'DeleteSuggester', + params, verb='POST') + + def describe_analysis_schemes(self, domain_name): + """ + Describes analysis schemes used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_analysis_schemes_response', + 'describe_analysis_schemes_result', + 'analysis_schemes') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeAnalysisSchemes', + params, verb='POST') + + def describe_availability_options(self, domain_name): + """ + Describes the availability options for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_availability_options_response', + 'describe_availability_options_result', + 'availability_options') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeAvailabilityOptions', + params, verb='POST') + + def describe_domains(self, domain_names=None): + """ + Describes the domains (optionally limited to one or more + domains by name) owned by this account. + + :type domain_names: list + :param domain_names: Limits the response to the specified domains. + + :raises: BaseException, InternalException + """ + doc_path = ('describe_domains_response', + 'describe_domains_result', + 'domain_status_list') + params = {} + if domain_names: + for i, domain_name in enumerate(domain_names, 1): + params['DomainNames.member.%d' % i] = domain_name + return self.get_response(doc_path, 'DescribeDomains', + params, verb='POST', + list_marker='DomainStatusList') + + def describe_expressions(self, domain_name, names=None): + """ + Describes RankExpressions in the search domain, optionally + limited to a single expression. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type names: list + :param names: Limit response to the specified names. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_expressions_response', + 'describe_expressions_result', + 'expressions') + params = {'DomainName': domain_name} + if names: + for i, expr_name in enumerate(names, 1): + params['ExpressionNames.member.%d' % i] = expr_name + return self.get_response(doc_path, 'DescribeExpressions', + params, verb='POST', + list_marker='Expressions') + + def describe_index_fields(self, domain_name, field_names=None): + """ + Describes index fields in the search domain, optionally + limited to a single ``IndexField``. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_names: list + :param field_names: Limits the response to the specified fields. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_index_fields_response', + 'describe_index_fields_result', + 'index_fields') + params = {'DomainName': domain_name} + if field_names: + for i, field_name in enumerate(field_names, 1): + params['FieldNames.member.%d' % i] = field_name + return self.get_response(doc_path, 'DescribeIndexFields', + params, verb='POST', + list_marker='IndexFields') + + def describe_scaling_parameters(self, domain_name): + """ + Describes the scaling parameters for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_scaling_parameters_response', + 'describe_scaling_parameters_result', + 'scaling_parameters') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeScalingParameters', + params, verb='POST') + + def describe_service_access_policies(self, domain_name): + """ + Describes the resource-based policies controlling access to + the services in this search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_service_access_policies_response', + 'describe_service_access_policies_result', + 'access_policies') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeServiceAccessPolicies', + params, verb='POST') + + def describe_suggesters(self, domain_name, names=None): + """ + Describes the suggesters for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type names: list + :param names: Limit response to the specified names. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_suggesters_response', + 'describe_suggesters_result', + 'suggesters') + params = {'DomainName': domain_name} + if names: + for i, suggester_name in enumerate(names, 1): + params['SuggesterNames.member.%d' % i] = suggester_name + + return self.get_response(doc_path, 'DescribeSuggesters', + params, verb='POST', list_marker="Suggesters") + + def index_documents(self, domain_name): + """ + Tells the search domain to start scanning its documents using + the latest text processing options and ``IndexFields``. This + operation must be invoked to make visible in searches any + options whose OptionStatus has ``OptionState`` of + ``RequiresIndexDocuments``. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('index_documents_response', + 'index_documents_result', + 'field_names') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'IndexDocuments', params, + verb='POST', list_marker='FieldNames') + + def update_availability_options(self, domain_name, multi_az): + """ + Updates availability options for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type multi_az: bool + :param multi_az: Should the domain be setup in multiple + Availability Zones + + :raises: BaseException, InternalException, InvalidTypeException, + LimitExceededException, ResourceNotFoundException + """ + doc_path = ('update_availability_options_response', + 'update_availability_options_result', + 'availability_options') + params = {'DomainName': domain_name, + 'MultiAZ': do_bool(multi_az)} + return self.get_response(doc_path, 'UpdateAvailabilityOptions', + params, verb='POST') + + def update_scaling_parameters(self, domain_name, instance_type=None, + replication_count=0): + """ + Updates scaling parameters for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type instance_type: str or None + :param instance_type: The type of instance to use. One of + None | search.m1.small | search.m1.large | search.m2.xlarge | search.m2.2xlarge + + :type replication_count: int + :param replication_count: The desired number of replicas. A + value of 0 will reset to the default. + + :raises: BaseException, InternalException, InvalidTypeException, + LimitExceededException, ResourceNotFoundException + """ + doc_path = ('update_scaling_parameters_response', + 'update_scaling_parameters_result', + 'scaling_parameters') + params = {'DomainName': domain_name} + if instance_type is not None: + params["ScalingParameters.DesiredInstanceType"] = instance_type + if replication_count is not None: + params["ScalingParameters.DesiredReplicationCount"] = replication_count + return self.get_response(doc_path, 'UpdateScalingParameters', + params, verb='POST') + + def update_service_access_policies(self, domain_name, access_policies): + """ + Updates the policies controlling access to the services in + this search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type access_policies: string + :param access_policies: An IAM access policy as described in + The Access Policy Language in Using AWS Identity and + Access Management. The maximum size of an access policy + document is 100KB. + + :raises: BaseException, InternalException, LimitExceededException, + ResourceNotFoundException, InvalidTypeException + """ + doc_path = ('update_service_access_policies_response', + 'update_service_access_policies_result', + 'access_policies') + params = {'AccessPolicies': access_policies, + 'DomainName': domain_name} + return self.get_response(doc_path, 'UpdateServiceAccessPolicies', + params, verb='POST') diff --git a/boto/cloudsearch2/layer2.py b/boto/cloudsearch2/layer2.py new file mode 100644 index 0000000000..bd73e52363 --- /dev/null +++ b/boto/cloudsearch2/layer2.py @@ -0,0 +1,77 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from .layer1 import Layer1 +from .domain import Domain + + +class Layer2(object): + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + host=None, debug=0, session_token=None, region=None, + validate_certs=True): + self.layer1 = Layer1( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + is_secure=is_secure, + port=port, + proxy=proxy, + proxy_port=proxy_port, + host=host, + debug=debug, + security_token=session_token, + region=region, + validate_certs=validate_certs) + + def list_domains(self, domain_names=None): + """ + Return a list of objects for each domain defined in the + current account. + :rtype: list of :class:`boto.cloudsearch2.domain.Domain` + """ + domain_data = self.layer1.describe_domains(domain_names) + return [Domain(self.layer1, data) for data in domain_data] + + def create_domain(self, domain_name): + """ + Create a new CloudSearch domain and return the corresponding object. + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch2.domain.Domain` + """ + data = self.layer1.create_domain(domain_name) + return Domain(self.layer1, data) + + def lookup(self, domain_name): + """ + Lookup a single domain + :param domain_name: The name of the domain to look up + :type domain_name: str + + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch2.domain.Domain` + """ + domains = self.list_domains(domain_names=[domain_name]) + if len(domains) > 0: + return domains[0] diff --git a/boto/cloudsearch2/optionstatus.py b/boto/cloudsearch2/optionstatus.py new file mode 100644 index 0000000000..a633eba001 --- /dev/null +++ b/boto/cloudsearch2/optionstatus.py @@ -0,0 +1,263 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import time +from boto.compat import json + + +class OptionStatus(dict): + """ + Presents a combination of status field (defined below) which are + accessed as attributes and option values which are stored in the + native Python dictionary. In this class, the option values are + merged from a JSON object that is stored as the Option part of + the object. + + :ivar domain_name: The name of the domain this option is associated with. + :ivar create_date: A timestamp for when this option was created. + :ivar state: The state of processing a change to an option. + Possible values: + + * RequiresIndexDocuments: the option's latest value will not + be visible in searches until IndexDocuments has been called + and indexing is complete. + * Processing: the option's latest value is not yet visible in + all searches but is in the process of being activated. + * Active: the option's latest value is completely visible. + + :ivar update_date: A timestamp for when this option was updated. + :ivar update_version: A unique integer that indicates when this + option was last updated. + """ + + def __init__(self, domain, data=None, refresh_fn=None, save_fn=None): + self.domain = domain + self.refresh_fn = refresh_fn + self.save_fn = save_fn + self.refresh(data) + + def _update_status(self, status): + self.creation_date = status['creation_date'] + self.status = status['state'] + self.update_date = status['update_date'] + self.update_version = int(status['update_version']) + + def _update_options(self, options): + if options: + self.update(json.loads(options)) + + def refresh(self, data=None): + """ + Refresh the local state of the object. You can either pass + new state data in as the parameter ``data`` or, if that parameter + is omitted, the state data will be retrieved from CloudSearch. + """ + if not data: + if self.refresh_fn: + data = self.refresh_fn(self.domain.name) + if data: + self._update_status(data['status']) + self._update_options(data['options']) + + def to_json(self): + """ + Return the JSON representation of the options as a string. + """ + return json.dumps(self) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'CreationDate': + self.created = value + elif name == 'State': + self.state = value + elif name == 'UpdateDate': + self.updated = value + elif name == 'UpdateVersion': + self.update_version = int(value) + elif name == 'Options': + self.update_from_json_doc(value) + else: + setattr(self, name, value) + + def save(self): + """ + Write the current state of the local object back to the + CloudSearch service. + """ + if self.save_fn: + data = self.save_fn(self.domain.name, self.to_json()) + self.refresh(data) + + def wait_for_state(self, state): + """ + Performs polling of CloudSearch to wait for the ``state`` + of this object to change to the provided state. + """ + while self.state != state: + time.sleep(5) + self.refresh() + + +class IndexFieldStatus(OptionStatus): + + def _update_options(self, options): + self.update(options) + + def save(self): + pass + + +class AvailabilityOptionsStatus(OptionStatus): + + def _update_options(self, options): + self.update(MultiAZ=json.loads(options)) + + def save(self): + pass + + +class ScalingParametersStatus(IndexFieldStatus): + + pass + + +class ExpressionStatus(IndexFieldStatus): + + pass + + +class ServicePoliciesStatus(OptionStatus): + + def new_statement(self, arn, ip): + """ + Returns a new policy statement that will allow + access to the service described by ``arn`` by the + ip specified in ``ip``. + + :type arn: string + :param arn: The Amazon Resource Notation identifier for the + service you wish to provide access to. This would be + either the search service or the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + return { + "Effect": "Allow", + "Action": "*", # Docs say use GET, but denies unless * + "Resource": arn, + "Condition": { + "IpAddress": { + "aws:SourceIp": [ip] + } + } + } + + def _allow_ip(self, arn, ip): + if 'Statement' not in self: + s = self.new_statement(arn, ip) + self['Statement'] = [s] + self.save() + else: + add_statement = True + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + add_statement = False + condition = statement['Condition'][condition_name] + if ip not in condition['aws:SourceIp']: + condition['aws:SourceIp'].append(ip) + + if add_statement: + s = self.new_statement(arn, ip) + self['Statement'].append(s) + self.save() + + def allow_search_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._allow_ip(arn, ip) + + def allow_doc_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._allow_ip(arn, ip) + + def _disallow_ip(self, arn, ip): + if 'Statement' not in self: + return + need_update = False + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + condition = statement['Condition'][condition_name] + if ip in condition['aws:SourceIp']: + condition['aws:SourceIp'].remove(ip) + need_update = True + if need_update: + self.save() + + def disallow_search_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._disallow_ip(arn, ip) + + def disallow_doc_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._disallow_ip(arn, ip) diff --git a/boto/cloudsearch2/search.py b/boto/cloudsearch2/search.py new file mode 100644 index 0000000000..f95b6a9099 --- /dev/null +++ b/boto/cloudsearch2/search.py @@ -0,0 +1,364 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from math import ceil +import boto +from boto.compat import json +import requests + +SIMPLE = 'simple' +STRUCTURED = 'structured' +LUCENE = 'lucene' +DISMAX = 'dismax' + + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + pass + + +class SearchResults(object): + def __init__(self, **attrs): + self.rid = attrs['status']['rid'] + # self.doc_coverage_pct = attrs['info']['doc-coverage-pct'] + self.time_ms = attrs['status']['time-ms'] + self.hits = attrs['hits']['found'] + self.docs = attrs['hits']['hit'] + self.start = attrs['hits']['start'] + #self.rank = attrs['rank'] + #self.match_expression = attrs['match-expr'] + self.query = attrs['query'] + self.search_service = attrs['search_service'] + + self.facets = {} + if 'facets' in attrs: + for (facet, values) in attrs['facets'].iteritems(): + if 'buckets' in values: + self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['buckets'])) + + self.num_pages_needed = ceil(self.hits / self.query.real_size) + + def __len__(self): + return len(self.docs) + + def __iter__(self): + return iter(self.docs) + + def next_page(self): + """Call Cloudsearch to get the next page of search results + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: the following page of search results + """ + if self.query.page <= self.num_pages_needed: + self.query.start += self.query.real_size + self.query.page += 1 + return self.search_service(self.query) + else: + raise StopIteration + + +class Query(object): + + RESULTS_PER_PAGE = 500 + + def __init__(self, q=None, parser=None, fq=None, expr=None, + return_fields=None, size=10, start=0, sort=None, + facet=None, highlight=None, partial=None, options=None): + + self.q = q + self.parser = parser + self.fq = fq + self.expr = expr or {} + self.sort = sort or [] + self.return_fields = return_fields or [] + self.start = start + self.facet = facet or {} + self.highlight = highlight or {} + self.partial = partial + self.options = options + self.page = 0 + self.update_size(size) + + def update_size(self, new_size): + self.size = new_size + self.real_size = Query.RESULTS_PER_PAGE if (self.size > + Query.RESULTS_PER_PAGE or self.size == 0) else self.size + + def to_params(self): + """Transform search parameters from instance properties to a dictionary + + :rtype: dict + :return: search parameters + """ + params = {'start': self.start, 'size': self.real_size} + + if self.q: + params['q'] = self.q + + if self.parser: + params['q.parser'] = self.parser + + if self.fq: + params['fq'] = self.fq + + if self.expr: + for k, v in self.expr.iteritems(): + params['expr.%s' % k] = v + + if self.facet: + for k, v in self.facet.iteritems(): + params['facet.%s' % k] = v + + if self.highlight: + for k, v in self.highlight.iteritems(): + params['highlight.%s' % k] = v + + if self.options: + params['options'] = self.options + + if self.return_fields: + params['return'] = ','.join(self.return_fields) + + if self.partial is not None: + params['partial'] = self.partial + + if self.sort: + params['sort'] = ','.join(self.sort) + + return params + + +class SearchConnection(object): + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + if not endpoint: + self.endpoint = domain.search_service_endpoint + + def build_query(self, q=None, parser=None, fq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, highlight=None, sort=None, + partial=None, options=None): + return Query(q=q, parser=parser, fq=fq, expr=rank, return_fields=return_fields, + size=size, start=start, facet=facet, highlight=highlight, + sort=sort, partial=partial, options=options) + + def search(self, q=None, parser=None, fq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, highlight=None, sort=None, partial=None, + options=None): + """ + Send a query to CloudSearch + + Each search query should use at least the q or bq argument to specify + the search parameter. The other options are used to specify the + criteria of the search. + + :type q: string + :param q: A string to search the default search fields for. + + :type parser: string + :param parser: The parser to use. 'simple', 'structured', 'lucene', 'dismax' + + :type fq: string + :param fq: The filter query to use. + + :type sort: List of strings + :param sort: A list of fields or rank expressions used to order the + search results. Order is handled by adding 'desc' or 'asc' after the field name. + ``['year desc', 'author asc']`` + + :type return_fields: List of strings + :param return_fields: A list of fields which should be returned by the + search. If this field is not specified, only IDs will be returned. + ``['headline']`` + + :type size: int + :param size: Number of search results to specify + + :type start: int + :param start: Offset of the first search result to return (can be used + for paging) + + :type facet: dict + :param facet: Dictionary of fields for which facets should be returned + The facet value is string of JSON options + ``{'year': '{sort:"bucket", size:3}', 'genres': '{buckets:["Action","Adventure","Sci-Fi"]}'}`` + + :type highlight: dict + :param highlight: Dictionary of fields for which highlights should be returned + The facet value is string of JSON options + ``{'genres': '{format:'text',max_phrases:2,pre_tag:'',post_tag:''}'}`` + + :type partial: bool + :param partial: Should partial results from a partioned service be returned if + one or more index partitions are unreachable. + + :type options: str + :param options: Options for the query parser specified in *parser*. + Specified as a string in JSON format. + ``{fields: ['title^5', 'description']}`` + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: Returns the results of this search + + The following examples all assume we have indexed a set of documents + with fields: *author*, *date*, *headline* + + A simple search will look for documents whose default text search + fields will contain the search word exactly: + + >>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy) + + A simple search with more keywords will return documents whose default + text search fields contain the search strings together or separately. + + >>> search(q='Tim apple') # Will match "tim" and "apple" + + More complex searches require the boolean search operator. + + Wildcard searches can be used to search for any words that start with + the search string. + + >>> search(q="'Tim*'") # Return documents with words like Tim or Timothy) + + Search terms can also be combined. Allowed operators are "and", "or", + "not", "field", "optional", "token", "phrase", or "filter" + + >>> search(q="(and 'Tim' (field author 'John Smith'))", parser='structured') + + Facets allow you to show classification information about the search + results. For example, you can retrieve the authors who have written + about Tim with a max of 3 + + >>> search(q='Tim', facet={'Author': '{sort:"bucket", size:3}'}) + """ + + query = self.build_query(q=q, parser=parser, fq=fq, rank=rank, + return_fields=return_fields, + size=size, start=start, facet=facet, + highlight=highlight, sort=sort, + options=options) + return self(query) + + def __call__(self, query): + """Make a call to CloudSearch + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: search results + """ + url = "http://%s/%s/search" % (self.endpoint, self.domain.layer1.APIVersion) + params = query.to_params() + + r = requests.get(url, params=params) + try: + data = json.loads(r.content) + except ValueError, e: + if r.status_code == 403: + msg = '' + import re + g = re.search('

403 Forbidden

([^<]+)<', r.content) + try: + msg = ': %s' % (g.groups()[0].strip()) + except AttributeError: + pass + raise SearchServiceException('Authentication error from Amazon%s' % msg) + raise SearchServiceException("Got non-json response from Amazon. %s" % r.content, query) + + if 'messages' in data and 'error' in data: + for m in data['messages']: + if m['severity'] == 'fatal': + raise SearchServiceException("Error processing search %s " + "=> %s" % (params, m['message']), query) + elif 'error' in data: + raise SearchServiceException("Unknown error processing search %s" + % json.dumps(data), query) + + data['query'] = query + data['search_service'] = self + + return SearchResults(**data) + + def get_all_paged(self, query, per_page): + """Get a generator to iterate over all pages of search results + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :type per_page: int + :param per_page: Number of docs in each :class:`boto.cloudsearch2.search.SearchResults` object. + + :rtype: generator + :return: Generator containing :class:`boto.cloudsearch2.search.SearchResults` + """ + query.update_size(per_page) + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + yield results + query.start += query.real_size + page += 1 + + def get_all_hits(self, query): + """Get a generator to iterate over all search results + + Transparently handles the results paging from Cloudsearch + search results so even if you have many thousands of results + you can iterate over all results in a reasonably efficient + manner. + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :rtype: generator + :return: All docs matching query + """ + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + for doc in results: + yield doc + query.start += query.real_size + page += 1 + + def get_num_hits(self, query): + """Return the total number of hits for query + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: a group of search criteria + + :rtype: int + :return: Total number of hits for query + """ + query.update_size(1) + return self(query).hits diff --git a/tests/integration/cloudsearch2/__init__.py b/tests/integration/cloudsearch2/__init__.py new file mode 100644 index 0000000000..b7fe4c2259 --- /dev/null +++ b/tests/integration/cloudsearch2/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/tests/integration/cloudsearch2/test_cert_verification.py b/tests/integration/cloudsearch2/test_cert_verification.py new file mode 100644 index 0000000000..a2ab6541d1 --- /dev/null +++ b/tests/integration/cloudsearch2/test_cert_verification.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Check that all of the certs on all service endpoints validate. +""" +import unittest + +from tests.integration import ServiceCertVerificationTest + +import boto.cloudsearch2 + + +class CloudSearchCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest): + cloudsearch = True + regions = boto.cloudsearch2.regions() + + def sample_service_call(self, conn): + conn.describe_domains() diff --git a/tests/integration/cloudsearch2/test_layers.py b/tests/integration/cloudsearch2/test_layers.py new file mode 100644 index 0000000000..90ceda46b8 --- /dev/null +++ b/tests/integration/cloudsearch2/test_layers.py @@ -0,0 +1,74 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Tests for Layer1 of Cloudsearch +""" +import time + +from tests.unit import unittest +from boto.cloudsearch2.layer1 import Layer1 +from boto.cloudsearch2.layer2 import Layer2 +from boto.regioninfo import RegionInfo + + +class CloudSearchLayer1Test(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchLayer1Test, self).setUp() + self.layer1 = Layer1() + self.domain_name = 'test-%d' % int(time.time()) + + def test_create_domain(self): + resp = self.layer1.create_domain(self.domain_name) + self.addCleanup(self.layer1.delete_domain, self.domain_name) + self.assertTrue(resp.get('created', False)) + + +class CloudSearchLayer2Test(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchLayer2Test, self).setUp() + self.layer2 = Layer2() + self.domain_name = 'test-%d' % int(time.time()) + + def test_create_domain(self): + domain = self.layer2.create_domain(self.domain_name) + self.addCleanup(domain.delete) + self.assertTrue(domain.created, False) + self.assertEqual(domain.domain_name, self.domain_name) + + def test_initialization_regression(self): + us_west_2 = RegionInfo( + name='us-west-2', + endpoint='cloudsearch.us-west-2.amazonaws.com' + ) + self.layer2 = Layer2( + region=us_west_2, + host='cloudsearch.us-west-2.amazonaws.com' + ) + self.assertEqual( + self.layer2.layer1.host, + 'cloudsearch.us-west-2.amazonaws.com' + ) diff --git a/tests/unit/cloudsearch2/__init__.py b/tests/unit/cloudsearch2/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/tests/unit/cloudsearch2/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/cloudsearch2/test_connection.py b/tests/unit/cloudsearch2/test_connection.py new file mode 100644 index 0000000000..6a2a0200b9 --- /dev/null +++ b/tests/unit/cloudsearch2/test_connection.py @@ -0,0 +1,228 @@ +#!/usr/bin env python + +from tests.unit import AWSMockServiceTestCase + +from boto.cloudsearch2.domain import Domain +from boto.cloudsearch2.layer1 import Layer1 + +import json + +class TestCloudSearchCreateDomain(AWSMockServiceTestCase): + connection_class = Layer1 + + def default_body(self): + return """ + + + + 0 + + arn:aws:cs:us-east-1:1234567890:domain/demo + search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com + + true + 1234567890/demo + false + 0 + demo + false + false + + arn:aws:cs:us-east-1:1234567890:domain/demo + doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com + + + + + 00000000-0000-0000-0000-000000000000 + + +""" + + def test_create_domain(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + + self.assert_request_parameters({ + 'Action': 'CreateDomain', + 'DomainName': 'demo', + 'Version': '2013-01-01', + }) + + def test_cloudsearch_connect_result_endpoints(self): + """Check that endpoints & ARNs are correctly returned from AWS""" + + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + self.assertEqual( + domain.doc_service_endpoint, + "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + self.assertEqual(domain.service_arn, + "arn:aws:cs:us-east-1:1234567890:domain/demo") + self.assertEqual( + domain.search_service_endpoint, + "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + def test_cloudsearch_connect_result_statuses(self): + """Check that domain statuses are correctly returned from AWS""" + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + self.assertEqual(domain.created, True) + self.assertEqual(domain.processing, False) + self.assertEqual(domain.requires_index_documents, False) + self.assertEqual(domain.deleted, False) + + def test_cloudsearch_connect_result_details(self): + """Check that the domain information is correctly returned from AWS""" + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + self.assertEqual(domain.id, "1234567890/demo") + self.assertEqual(domain.name, "demo") + + def test_cloudsearch_documentservice_creation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + document = domain.get_document_service() + + self.assertEqual( + document.endpoint, + "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + def test_cloudsearch_searchservice_creation(self): + self.set_http_response(status_code=200) + api_response = self.service_connection.create_domain('demo') + domain = Domain(self, api_response) + + search = domain.get_search_service() + + self.assertEqual( + search.endpoint, + "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + +class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase): + connection_class = Layer1 + + def default_body(self): + return """ + + + + 0 + + arn:aws:cs:us-east-1:1234567890:search/demo + search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com + + true + 1234567890/demo + false + 0 + demo + false + false + + arn:aws:cs:us-east-1:1234567890:doc/demo + doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com + + + + + 00000000-0000-0000-0000-000000000000 + + +""" + + def test_cloudsearch_deletion(self): + """ + Check that the correct arguments are sent to AWS when creating a + cloudsearch connection. + """ + self.set_http_response(status_code=200) + api_response = self.service_connection.delete_domain('demo') + + self.assert_request_parameters({ + 'Action': 'DeleteDomain', + 'DomainName': 'demo', + 'Version': '2013-01-01', + }) + + +class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase): + connection_class = Layer1 + + def default_body(self): + return """ + + + + average_score + brand_id + colors + context + context_owner + created_at + creator_id + description + file_size + format + has_logo + has_messaging + height + image_id + ingested_from + is_advertising + is_photo + is_reviewed + modified_at + subject_date + tags + title + width + + + + eb2b2390-6bbd-11e2-ab66-93f3a90dcf2a + + +""" + + def test_cloudsearch_index_documents(self): + """ + Check that the correct arguments are sent to AWS when indexing a + domain. + """ + self.set_http_response(status_code=200) + api_response = self.service_connection.index_documents('demo') + + self.assert_request_parameters({ + 'Action': 'IndexDocuments', + 'DomainName': 'demo', + 'Version': '2013-01-01', + }) + + def test_cloudsearch_index_documents_resp(self): + """ + Check that the AWS response is being parsed correctly when indexing a + domain. + """ + self.set_http_response(status_code=200) + api_response = self.service_connection.index_documents('demo') + + self.assertEqual(api_response, ['average_score', 'brand_id', 'colors', + 'context', 'context_owner', + 'created_at', 'creator_id', + 'description', 'file_size', 'format', + 'has_logo', 'has_messaging', 'height', + 'image_id', 'ingested_from', + 'is_advertising', 'is_photo', + 'is_reviewed', 'modified_at', + 'subject_date', 'tags', 'title', + 'width']) diff --git a/tests/unit/cloudsearch2/test_document.py b/tests/unit/cloudsearch2/test_document.py new file mode 100644 index 0000000000..7d9d011f7c --- /dev/null +++ b/tests/unit/cloudsearch2/test_document.py @@ -0,0 +1,324 @@ +#!/usr/bin env python + +from tests.unit import unittest +from httpretty import HTTPretty +from mock import MagicMock + +import urlparse +import json + +from boto.cloudsearch2.document import DocumentServiceConnection +from boto.cloudsearch2.document import CommitMismatchError, EncodingError, \ + ContentTooLongError, DocumentServiceConnection + +import boto + +class CloudSearchDocumentTest(unittest.TestCase): + def setUp(self): + HTTPretty.enable() + HTTPretty.register_uri( + HTTPretty.POST, + ("http://doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com/" + "2013-01-01/documents/batch"), + body=json.dumps(self.response), + content_type="application/json") + + def tearDown(self): + HTTPretty.disable() + +class CloudSearchDocumentSingleTest(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + def test_cloudsearch_add_basics(self): + """ + Check that a simple add document actually sends an add document request + to AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body)[0] + + self.assertEqual(args['lang'], 'en') + self.assertEqual(args['type'], 'add') + + def test_cloudsearch_add_single_basic(self): + """ + Check that a simple add document sends correct document metadata to + AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body)[0] + + self.assertEqual(args['id'], '1234') + self.assertEqual(args['version'], 10) + self.assertEqual(args['type'], 'add') + + def test_cloudsearch_add_single_fields(self): + """ + Check that a simple add document sends the actual document to AWS. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + document.commit() + + args = json.loads(HTTPretty.last_request.body)[0] + + self.assertEqual(args['fields']['category'], ['cat_a', 'cat_b', + 'cat_c']) + self.assertEqual(args['fields']['id'], '1234') + self.assertEqual(args['fields']['title'], 'Title 1') + + def test_cloudsearch_add_single_result(self): + """ + Check that the reply from adding a single document is correctly parsed. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, 1) + self.assertEqual(doc.deletes, 0) + + self.assertEqual(doc.doc_service, document) + + +class CloudSearchDocumentMultipleAddTest(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 3, + 'deletes': 0, + } + + objs = { + '1234': { + 'version': 10, 'fields': {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", + "cat_c"]}}, + '1235': { + 'version': 11, 'fields': {"id": "1235", "title": "Title 2", + "category": ["cat_b", "cat_c", + "cat_d"]}}, + '1236': { + 'version': 12, 'fields': {"id": "1236", "title": "Title 3", + "category": ["cat_e", "cat_f", + "cat_g"]}}, + } + + + def test_cloudsearch_add_basics(self): + """Check that multiple documents are added correctly to AWS""" + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + for (key, obj) in self.objs.items(): + document.add(key, obj['version'], obj['fields']) + document.commit() + + args = json.loads(HTTPretty.last_request.body) + + for arg in args: + self.assertTrue(arg['id'] in self.objs) + self.assertEqual(arg['version'], self.objs[arg['id']]['version']) + self.assertEqual(arg['fields']['id'], + self.objs[arg['id']]['fields']['id']) + self.assertEqual(arg['fields']['title'], + self.objs[arg['id']]['fields']['title']) + self.assertEqual(arg['fields']['category'], + self.objs[arg['id']]['fields']['category']) + + def test_cloudsearch_add_results(self): + """ + Check that the result from adding multiple documents is parsed + correctly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + for (key, obj) in self.objs.items(): + document.add(key, obj['version'], obj['fields']) + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, len(self.objs)) + self.assertEqual(doc.deletes, 0) + + +class CloudSearchDocumentDelete(CloudSearchDocumentTest): + + response = { + 'status': 'success', + 'adds': 0, + 'deletes': 1, + } + + def test_cloudsearch_delete(self): + """ + Test that the request for a single document deletion is done properly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5", "10") + document.commit() + args = json.loads(HTTPretty.last_request.body)[0] + + self.assertEqual(args['version'], '10') + self.assertEqual(args['type'], 'delete') + self.assertEqual(args['id'], '5') + + def test_cloudsearch_delete_results(self): + """ + Check that the result of a single document deletion is parsed properly. + """ + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5", "10") + doc = document.commit() + + self.assertEqual(doc.status, 'success') + self.assertEqual(doc.adds, 0) + self.assertEqual(doc.deletes, 1) + + +class CloudSearchDocumentDeleteMultiple(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 0, + 'deletes': 2, + } + + def test_cloudsearch_delete_multiples(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.delete("5", "10") + document.delete("6", "11") + document.commit() + args = json.loads(HTTPretty.last_request.body) + + self.assertEqual(len(args), 2) + for arg in args: + self.assertEqual(arg['type'], 'delete') + + if arg['id'] == '5': + self.assertEqual(arg['version'], '10') + elif arg['id'] == '6': + self.assertEqual(arg['version'], '11') + else: # Unknown result out of AWS that shouldn't be there + self.assertTrue(False) + + +class CloudSearchSDFManipulation(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + + def test_cloudsearch_initial_sdf_is_blank(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + self.assertEqual(document.get_sdf(), '[]') + + def test_cloudsearch_single_document_sdf(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertNotEqual(document.get_sdf(), '[]') + + document.clear_sdf() + + self.assertEqual(document.get_sdf(), '[]') + +class CloudSearchBadSDFTesting(CloudSearchDocumentTest): + response = { + 'status': 'success', + 'adds': 1, + 'deletes': 0, + } + + def test_cloudsearch_erroneous_sdf(self): + original = boto.log.error + boto.log.error = MagicMock() + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", 10, {"id": "1234", "title": None, + "category": ["cat_a", "cat_b", "cat_c"]}) + + document.commit() + self.assertNotEqual(len(boto.log.error.call_args_list), 1) + + boto.log.error = original + + +class CloudSearchDocumentErrorBadUnicode(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'Illegal Unicode character in document'}] + } + + def test_fake_bad_unicode(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + self.assertRaises(EncodingError, document.commit) + + +class CloudSearchDocumentErrorDocsTooBig(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'The Content-Length is too long'}] + } + + def test_fake_docs_too_big(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertRaises(ContentTooLongError, document.commit) + + +class CloudSearchDocumentErrorMismatch(CloudSearchDocumentTest): + response = { + 'status': 'error', + 'adds': 0, + 'deletes': 0, + 'errors': [{'message': 'Something went wrong'}] + } + + def test_fake_failure(self): + document = DocumentServiceConnection( + endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + + document.add("1234", 10, {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) + + self.assertRaises(CommitMismatchError, document.commit) diff --git a/tests/unit/cloudsearch2/test_exceptions.py b/tests/unit/cloudsearch2/test_exceptions.py new file mode 100644 index 0000000000..6d255cd22d --- /dev/null +++ b/tests/unit/cloudsearch2/test_exceptions.py @@ -0,0 +1,37 @@ +import mock +from boto.compat import json +from tests.unit import unittest + +from .test_search import HOSTNAME, CloudSearchSearchBaseTest +from boto.cloudsearch2.search import SearchConnection, SearchServiceException + + +def fake_loads_value_error(content, *args, **kwargs): + """Callable to generate a fake ValueError""" + raise ValueError("HAHAHA! Totally not simplejson & you gave me bad JSON.") + + +def fake_loads_json_error(content, *args, **kwargs): + """Callable to generate a fake JSONDecodeError""" + raise json.JSONDecodeError('Using simplejson & you gave me bad JSON.', + '', 0) + + +class CloudSearchJSONExceptionTest(CloudSearchSearchBaseTest): + response = '{}' + + def test_no_simplejson_value_error(self): + with mock.patch.object(json, 'loads', fake_loads_value_error): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'non-json'): + search.search(q='test') + + @unittest.skipUnless(hasattr(json, 'JSONDecodeError'), + 'requires simplejson') + def test_simplejson_jsondecodeerror(self): + with mock.patch.object(json, 'loads', fake_loads_json_error): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'non-json'): + search.search(q='test') diff --git a/tests/unit/cloudsearch2/test_search.py b/tests/unit/cloudsearch2/test_search.py new file mode 100644 index 0000000000..63ac2f4ff6 --- /dev/null +++ b/tests/unit/cloudsearch2/test_search.py @@ -0,0 +1,431 @@ +#!/usr/bin env python + +from tests.unit import unittest +from httpretty import HTTPretty + +import urlparse +import json +import mock +import requests + +from boto.cloudsearch2.search import SearchConnection, SearchServiceException + +HOSTNAME = "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com" +FULL_URL = 'http://%s/2013-01-01/search' % HOSTNAME + + +class CloudSearchSearchBaseTest(unittest.TestCase): + + hits = [ + { + 'id': '12341', + 'title': 'Document 1', + }, + { + 'id': '12342', + 'title': 'Document 2', + }, + { + 'id': '12343', + 'title': 'Document 3', + }, + { + 'id': '12344', + 'title': 'Document 4', + }, + { + 'id': '12345', + 'title': 'Document 5', + }, + { + 'id': '12346', + 'title': 'Document 6', + }, + { + 'id': '12347', + 'title': 'Document 7', + }, + ] + + content_type = "text/xml" + response_status = 200 + + def get_args(self, requestline): + (_, request, _) = requestline.split(" ") + (_, request) = request.split("?", 1) + args = urlparse.parse_qs(request) + return args + + def setUp(self): + HTTPretty.enable() + body = self.response + + if not isinstance(body, basestring): + body = json.dumps(body) + + HTTPretty.register_uri(HTTPretty.GET, FULL_URL, + body=body, + content_type=self.content_type, + status=self.response_status) + + def tearDown(self): + HTTPretty.disable() + +class CloudSearchSearchTest(CloudSearchSearchBaseTest): + response = { + 'rank': '-text_relevance', + 'match-expr':"Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit':CloudSearchSearchBaseTest.hits + }, + 'info': { + 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + } + + } + + def test_cloudsearch_qsearch(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test') + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['q'], ["Test"]) + self.assertEqual(args['start'], ["0"]) + self.assertEqual(args['size'], ["10"]) + + def test_cloudsearch_bqsearch(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(bq="'Test'") + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['bq'], ["'Test'"]) + + def test_cloudsearch_search_details(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', size=50, start=20) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['q'], ["Test"]) + self.assertEqual(args['size'], ["50"]) + self.assertEqual(args['start'], ["20"]) + + def test_cloudsearch_facet_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet=["Author"]) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet'], ["Author"]) + + def test_cloudsearch_facet_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet=["author", "cat"]) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet'], ["author,cat"]) + + def test_cloudsearch_facet_constraint_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search( + q='Test', + facet_constraints={'author': "'John Smith','Mark Smith'"}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet-author-constraints'], + ["'John Smith','Mark Smith'"]) + + def test_cloudsearch_facet_constraint_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search( + q='Test', + facet_constraints={'author': "'John Smith','Mark Smith'", + 'category': "'News','Reviews'"}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet-author-constraints'], + ["'John Smith','Mark Smith'"]) + self.assertEqual(args['facet-category-constraints'], + ["'News','Reviews'"]) + + def test_cloudsearch_facet_sort_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet_sort={'author': 'alpha'}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet-author-sort'], ['alpha']) + + def test_cloudsearch_facet_sort_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet_sort={'author': 'alpha', + 'cat': 'count'}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet-author-sort'], ['alpha']) + self.assertEqual(args['facet-cat-sort'], ['count']) + + def test_cloudsearch_top_n_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet_top_n={'author': 5}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet-author-top-n'], ['5']) + + def test_cloudsearch_top_n_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', facet_top_n={'author': 5, 'cat': 10}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['facet-author-top-n'], ['5']) + self.assertEqual(args['facet-cat-top-n'], ['10']) + + def test_cloudsearch_rank_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', rank=["date"]) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['rank'], ['date']) + + def test_cloudsearch_rank_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', rank=["date", "score"]) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['rank'], ['date,score']) + + def test_cloudsearch_result_fields_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', return_fields=['author']) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['return-fields'], ['author']) + + def test_cloudsearch_result_fields_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', return_fields=['author', 'title']) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['return-fields'], ['author,title']) + + + def test_cloudsearch_t_field_single(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', t={'year':'2001..2007'}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['t-year'], ['2001..2007']) + + def test_cloudsearch_t_field_multiple(self): + search = SearchConnection(endpoint=HOSTNAME) + + search.search(q='Test', t={'year':'2001..2007', 'score':'10..50'}) + + args = self.get_args(HTTPretty.last_request.raw_requestline) + + self.assertEqual(args['t-year'], ['2001..2007']) + self.assertEqual(args['t-score'], ['10..50']) + + + def test_cloudsearch_results_meta(self): + """Check returned metadata is parsed correctly""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + # These rely on the default response which is fed into HTTPretty + self.assertEqual(results.rank, "-text_relevance") + self.assertEqual(results.match_expression, "Test") + + def test_cloudsearch_results_info(self): + """Check num_pages_needed is calculated correctly""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + # This relies on the default response which is fed into HTTPretty + self.assertEqual(results.num_pages_needed, 3.0) + + def test_cloudsearch_results_matched(self): + """ + Check that information objects are passed back through the API + correctly. + """ + search = SearchConnection(endpoint=HOSTNAME) + query = search.build_query(q='Test') + + results = search(query) + + self.assertEqual(results.search_service, search) + self.assertEqual(results.query, query) + + def test_cloudsearch_results_hits(self): + """Check that documents are parsed properly from AWS""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + hits = map(lambda x: x['id'], results.docs) + + # This relies on the default response which is fed into HTTPretty + self.assertEqual( + hits, ["12341", "12342", "12343", "12344", + "12345", "12346", "12347"]) + + def test_cloudsearch_results_iterator(self): + """Check the results iterator""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + results_correct = iter(["12341", "12342", "12343", "12344", + "12345", "12346", "12347"]) + for x in results: + self.assertEqual(x['id'], results_correct.next()) + + + def test_cloudsearch_results_internal_consistancy(self): + """Check the documents length matches the iterator details""" + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test') + + self.assertEqual(len(results), len(results.docs)) + + def test_cloudsearch_search_nextpage(self): + """Check next page query is correct""" + search = SearchConnection(endpoint=HOSTNAME) + query1 = search.build_query(q='Test') + query2 = search.build_query(q='Test') + + results = search(query2) + + self.assertEqual(results.next_page().query.start, + query1.start + query1.size) + self.assertEqual(query1.q, query2.q) + +class CloudSearchSearchFacetTest(CloudSearchSearchBaseTest): + response = { + 'rank': '-text_relevance', + 'match-expr':"Test", + 'hits': { + 'found': 30, + 'start': 0, + 'hit':CloudSearchSearchBaseTest.hits + }, + 'info': { + 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', + 'time-ms': 2, + 'cpu-time-ms': 0 + }, + 'facets': { + 'tags': {}, + 'animals': {'constraints': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value':'lions'}]}, + } + } + + def test_cloudsearch_search_facets(self): + #self.response['facets'] = {'tags': {}} + + search = SearchConnection(endpoint=HOSTNAME) + + results = search.search(q='Test', facet=['tags']) + + self.assertTrue('tags' not in results.facets) + self.assertEqual(results.facets['animals'], {u'lions': u'1', u'fish': u'2'}) + + +class CloudSearchNonJsonTest(CloudSearchSearchBaseTest): + response = '

500 Internal Server Error

' + response_status = 500 + content_type = 'text/xml' + + def test_response(self): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaises(SearchServiceException): + search.search(q='Test') + + +class CloudSearchUnauthorizedTest(CloudSearchSearchBaseTest): + response = '

403 Forbidden

foo bar baz' + response_status = 403 + content_type = 'text/html' + + def test_response(self): + search = SearchConnection(endpoint=HOSTNAME) + + with self.assertRaisesRegexp(SearchServiceException, 'foo bar baz'): + search.search(q='Test') + + +class FakeResponse(object): + status_code = 405 + content = '' + + +class CloudSearchConnectionTest(unittest.TestCase): + cloudsearch = True + + def setUp(self): + super(CloudSearchConnectionTest, self).setUp() + self.conn = SearchConnection( + endpoint='test-domain.cloudsearch.amazonaws.com' + ) + + def test_expose_additional_error_info(self): + mpo = mock.patch.object + fake = FakeResponse() + fake.content = 'Nopenopenope' + + # First, in the case of a non-JSON, non-403 error. + with mpo(requests, 'get', return_value=fake) as mock_request: + with self.assertRaises(SearchServiceException) as cm: + self.conn.search(q='not_gonna_happen') + + self.assertTrue('non-json response' in str(cm.exception)) + self.assertTrue('Nopenopenope' in str(cm.exception)) + + # Then with JSON & an 'error' key within. + fake.content = json.dumps({ + 'error': "Something went wrong. Oops." + }) + + with mpo(requests, 'get', return_value=fake) as mock_request: + with self.assertRaises(SearchServiceException) as cm: + self.conn.search(q='no_luck_here') + + self.assertTrue('Unknown error' in str(cm.exception)) + self.assertTrue('went wrong. Oops' in str(cm.exception)) From 5260282e7e4f38b005f8d53ba63edc877505e385 Mon Sep 17 00:00:00 2001 From: Aron Rosenberg Date: Mon, 14 Apr 2014 13:43:09 -0700 Subject: [PATCH 46/60] Pass partial= param into query layer --- boto/cloudsearch2/search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boto/cloudsearch2/search.py b/boto/cloudsearch2/search.py index f95b6a9099..470f19a080 100644 --- a/boto/cloudsearch2/search.py +++ b/boto/cloudsearch2/search.py @@ -261,7 +261,7 @@ def search(self, q=None, parser=None, fq=None, rank=None, return_fields=None, return_fields=return_fields, size=size, start=start, facet=facet, highlight=highlight, sort=sort, - options=options) + partial=partial, options=options) return self(query) def __call__(self, query): From 5ec8c7cb1bd778be3dbd01d2f005775ea910217a Mon Sep 17 00:00:00 2001 From: Aron Rosenberg Date: Mon, 14 Apr 2014 13:50:22 -0700 Subject: [PATCH 47/60] Handle case where self.domain isn't defined and we have to default to some API version number. Use new 2013-01-01 in this case. --- boto/cloudsearch2/document.py | 5 ++++- boto/cloudsearch2/search.py | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/boto/cloudsearch2/document.py b/boto/cloudsearch2/document.py index d6220b518e..7ce4b3fbe1 100644 --- a/boto/cloudsearch2/document.py +++ b/boto/cloudsearch2/document.py @@ -189,7 +189,10 @@ def commit(self): index = sdf.index(': null') boto.log.error(sdf[index - 100:index + 100]) - url = "http://%s/%s/documents/batch" % (self.endpoint, self.domain.layer1.APIVersion) + api_version = '2013-01-01' + if self.domain: + api_version = self.domain.layer1.APIVersion + url = "http://%s/%s/documents/batch" % (self.endpoint, api_version) # Keep-alive is automatic in a post-1.0 requests world. session = requests.Session() diff --git a/boto/cloudsearch2/search.py b/boto/cloudsearch2/search.py index 470f19a080..0315581614 100644 --- a/boto/cloudsearch2/search.py +++ b/boto/cloudsearch2/search.py @@ -273,7 +273,10 @@ def __call__(self, query): :rtype: :class:`boto.cloudsearch2.search.SearchResults` :return: search results """ - url = "http://%s/%s/search" % (self.endpoint, self.domain.layer1.APIVersion) + api_version = '2013-01-01' + if self.domain: + api_version = self.domain.layer1.APIVersion + url = "http://%s/%s/search" % (self.endpoint, api_version) params = query.to_params() r = requests.get(url, params=params) From 0f93bbad88c0ed66eb945e8bf6b03008ffb2a87b Mon Sep 17 00:00:00 2001 From: "Daniel G. Taylor" Date: Thu, 17 Apr 2014 16:35:08 -0700 Subject: [PATCH 48/60] CloudSearch2 updates Changes the following: * Generated layer1.py from service model * Layer1 -> CloudSearchConnection * Layer1 now uses JSON instead of XML responses * Updated layer2 objects to use JSON and generated layer1 * Updated doc service to remove attributes which are no longer valid * Updated search service to remove attributes which are no longer valid and modify existing attributes which have changed * Updated unit tests and integration tests for all of the above --- boto/cloudsearch2/__init__.py | 9 +- boto/cloudsearch2/document.py | 57 +- boto/cloudsearch2/domain.py | 238 ++- boto/cloudsearch2/exceptions.py | 46 + boto/cloudsearch2/layer1.py | 1440 ++++++++--------- boto/cloudsearch2/layer2.py | 21 +- boto/cloudsearch2/optionstatus.py | 50 +- boto/cloudsearch2/search.py | 8 +- docs/source/ref/cloudsearch.rst | 49 +- docs/source/ref/cloudsearch2.rst | 61 + setup.py | 3 +- tests/integration/cloudsearch2/test_layers.py | 11 +- tests/unit/cloudsearch2/test_connection.py | 240 +-- tests/unit/cloudsearch2/test_document.py | 71 +- tests/unit/cloudsearch2/test_search.py | 161 +- 15 files changed, 1261 insertions(+), 1204 deletions(-) create mode 100644 boto/cloudsearch2/exceptions.py create mode 100644 docs/source/ref/cloudsearch2.rst diff --git a/boto/cloudsearch2/__init__.py b/boto/cloudsearch2/__init__.py index bffa50d9a5..d14c917935 100644 --- a/boto/cloudsearch2/__init__.py +++ b/boto/cloudsearch2/__init__.py @@ -1,6 +1,4 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -20,8 +18,7 @@ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. -# -from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import get_regions def regions(): @@ -34,7 +31,7 @@ def regions(): import boto.cloudsearch2.layer1 return get_regions( 'cloudsearch', - connection_cls=boto.cloudsearch2.layer1.Layer1 + connection_cls=boto.cloudsearch2.layer1.CloudSearchConnection ) diff --git a/boto/cloudsearch2/document.py b/boto/cloudsearch2/document.py index 7ce4b3fbe1..ed0f6c3f3c 100644 --- a/boto/cloudsearch2/document.py +++ b/boto/cloudsearch2/document.py @@ -1,6 +1,5 @@ # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -62,13 +61,14 @@ class DocumentServiceConnection(object): A CloudSearch document service. The DocumentServiceConection is used to add, remove and update documents in - CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format). + CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document + Format). To generate an appropriate SDF, use :func:`add` to add or update documents, as well as :func:`delete` to remove documents. - Once the set of documents is ready to be index, use :func:`commit` to send the - commands to CloudSearch. + Once the set of documents is ready to be index, use :func:`commit` to send + the commands to CloudSearch. If there are a lot of documents to index, it may be preferable to split the generation of SDF data and the actual uploading into CloudSearch. Retrieve @@ -91,7 +91,7 @@ def __init__(self, domain=None, endpoint=None): self.documents_batch = [] self._sdf = None - def add(self, _id, version, fields, lang='en'): + def add(self, _id, fields): """ Add a document to be processed by the DocumentService @@ -100,39 +100,25 @@ def add(self, _id, version, fields, lang='en'): :type _id: string :param _id: A unique ID used to refer to this document. - :type version: int - :param version: Version of the document being indexed. If a file is - being reindexed, the version should be higher than the existing one - in CloudSearch. - :type fields: dict :param fields: A dictionary of key-value pairs to be uploaded . - - :type lang: string - :param lang: The language code the data is in. Only 'en' is currently - supported """ - d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang, - 'fields': fields} + d = {'type': 'add', 'id': _id, 'fields': fields} self.documents_batch.append(d) - def delete(self, _id, version): + def delete(self, _id): """ Schedule a document to be removed from the CloudSearch service - The document will not actually be scheduled for removal until :func:`commit` is called + The document will not actually be scheduled for removal until + :func:`commit` is called :type _id: string :param _id: The unique ID of this document. - - :type version: int - :param version: Version of the document to remove. The delete will only - occur if this version number is higher than the version currently - in the index. """ - d = {'type': 'delete', 'id': _id, 'version': version} + d = {'type': 'delete', 'id': _id} self.documents_batch.append(d) def get_sdf(self): @@ -149,8 +135,8 @@ def clear_sdf(self): """ Clear the working documents from this DocumentServiceConnection - This should be used after :func:`commit` if the connection will be reused - for another set of documents. + This should be used after :func:`commit` if the connection will be + reused for another set of documents. """ self._sdf = None @@ -184,8 +170,8 @@ def commit(self): sdf = self.get_sdf() if ': null' in sdf: - boto.log.error('null value in sdf detected. This will probably raise ' - '500 error.') + boto.log.error('null value in sdf detected. This will probably ' + 'raise 500 error.') index = sdf.index(': null') boto.log.error(sdf[index - 100:index + 100]) @@ -203,7 +189,8 @@ def commit(self): ) session.mount('http://', adapter) session.mount('https://', adapter) - r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'}) + r = session.post(url, data=sdf, + headers={'Content-Type': 'application/json'}) return CommitResponse(r, self, sdf) @@ -231,14 +218,15 @@ def __init__(self, response, doc_service, sdf): try: self.content = json.loads(response.content) except: - boto.log.error('Error indexing documents.\nResponse Content:\n{0}\n\n' - 'SDF:\n{1}'.format(response.content, self.sdf)) + boto.log.error('Error indexing documents.\nResponse Content:\n{0}' + '\n\nSDF:\n{1}'.format(response.content, self.sdf)) raise boto.exception.BotoServerError(self.response.status_code, '', body=response.content) self.status = self.content['status'] if self.status == 'error': - self.errors = [e.get('message') for e in self.content.get('errors', [])] + self.errors = [e.get('message') for e in self.content.get('errors', + [])] for e in self.errors: if "Illegal Unicode character" in e: raise EncodingError("Illegal Unicode character in document") @@ -267,6 +255,7 @@ def _check_num_ops(self, type_, response_num): if d['type'] == type_]) if response_num != commit_num: + boto.log.debug(self.response.content) raise CommitMismatchError( - 'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\ + 'Incorrect number of {0}s returned. Commit: {1} Response: {2}' .format(type_, commit_num, response_num)) diff --git a/boto/cloudsearch2/domain.py b/boto/cloudsearch2/domain.py index cca13c1900..0643eaf8fc 100644 --- a/boto/cloudsearch2/domain.py +++ b/boto/cloudsearch2/domain.py @@ -1,6 +1,4 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -22,7 +20,6 @@ # IN THE SOFTWARE. # -import boto from .optionstatus import IndexFieldStatus from .optionstatus import ServicePoliciesStatus from .optionstatus import ExpressionStatus @@ -90,18 +87,18 @@ def __init__(self, layer1, data): self.update_from_data(data) def update_from_data(self, data): - self.created = data['created'] - self.deleted = data['deleted'] - self.processing = data['processing'] - self.requires_index_documents = data['requires_index_documents'] - self.domain_id = data['domain_id'] - self.domain_name = data['domain_name'] - self.search_instance_count = data['search_instance_count'] - self.search_instance_type = data.get('search_instance_type', None) - self.search_partition_count = data['search_partition_count'] - self._doc_service = data['doc_service'] - self._service_arn = data['arn'] - self._search_service = data['search_service'] + self.created = data['Created'] + self.deleted = data['Deleted'] + self.processing = data['Processing'] + self.requires_index_documents = data['RequiresIndexDocuments'] + self.domain_id = data['DomainId'] + self.domain_name = data['DomainName'] + self.search_instance_count = data['SearchInstanceCount'] + self.search_instance_type = data.get('SearchInstanceType', None) + self.search_partition_count = data['SearchPartitionCount'] + self._doc_service = data['DocService'] + self._service_arn = data['ARN'] + self._search_service = data['SearchService'] @property def service_arn(self): @@ -109,11 +106,11 @@ def service_arn(self): @property def doc_service_endpoint(self): - return self._doc_service['endpoint'] + return self._doc_service['Endpoint'] @property def search_service_endpoint(self): - return self._search_service['endpoint'] + return self._search_service['Endpoint'] @property def created(self): @@ -189,11 +186,15 @@ def get_availability_options(self): object representing the currently defined availability options for the domain. :return: OptionsStatus object - :rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` object + :rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` + object """ return AvailabilityOptionsStatus( - self, None, self.layer1.describe_availability_options, - self.layer1.update_availability_options) + self, refresh_fn=self.layer1.describe_availability_options, + refresh_key=['DescribeAvailabilityOptionsResponse', + 'DescribeAvailabilityOptionsResult', + 'AvailabilityOptions'], + save_fn=self.layer1.update_availability_options) def get_scaling_options(self): """ @@ -201,12 +202,15 @@ def get_scaling_options(self): object representing the currently defined scaling options for the domain. :return: ScalingParametersStatus object - :rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus` object + :rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus` + object """ return ScalingParametersStatus( - self, None, - self.layer1.describe_scaling_parameters, - self.layer1.update_scaling_parameters) + self, refresh_fn=self.layer1.describe_scaling_parameters, + refresh_key=['DescribeScalingParametersResponse', + 'DescribeScalingParametersResult', + 'ScalingParameters'], + save_fn=self.layer1.update_scaling_parameters) def get_access_policies(self): """ @@ -216,9 +220,12 @@ def get_access_policies(self): :return: ServicePoliciesStatus object :rtype: :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object """ - return ServicePoliciesStatus(self, None, - self.layer1.describe_service_access_policies, - self.layer1.update_service_access_policies) + return ServicePoliciesStatus( + self, refresh_fn=self.layer1.describe_service_access_policies, + refresh_key=['DescribeServiceAccessPoliciesResponse', + 'DescribeServiceAccessPoliciesResult', + 'AccessPolicies'], + save_fn=self.layer1.update_service_access_policies) def index_documents(self): """ @@ -234,9 +241,15 @@ def get_index_fields(self, field_names=None): """ Return a list of index fields defined for this domain. :return: list of IndexFieldStatus objects - :rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus` object + :rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus` + object """ data = self.layer1.describe_index_fields(self.name, field_names) + + data = (data['DescribeIndexFieldsResponse'] + ['DescribeIndexFieldsResult'] + ['IndexFields']) + return [IndexFieldStatus(self, d) for d in data] def create_index_field(self, field_name, field_type, @@ -308,14 +321,141 @@ def create_index_field(self, field_name, field_type, :raises: BaseException, InternalException, LimitExceededException, InvalidTypeException, ResourceNotFoundException """ - data = self.layer1.define_index_field(self.name, field_name, field_type, - default=default, facet=facet, - returnable=returnable, - searchable=searchable, - sortable=sortable, - highlight=highlight, - source_field=source_field, - analysis_scheme=analysis_scheme) + index = { + 'IndexFieldName': field_name, + 'IndexFieldType': field_type + } + if field_type == 'literal': + index['LiteralOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['LiteralOptions']['DefaultValue'] = default + if source_field: + index['LiteralOptions']['SourceField'] = source_field + elif field_type == 'literal-array': + index['LiteralArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['LiteralArrayOptions']['DefaultValue'] = default + if source_field: + index['LiteralArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'int': + index['IntOptions'] = { + 'DefaultValue': default, + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['IntOptions']['DefaultValue'] = default + if source_field: + index['IntOptions']['SourceField'] = source_field + elif field_type == 'int-array': + index['IntArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['IntArrayOptions']['DefaultValue'] = default + if source_field: + index['IntArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'date': + index['DateOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['DateOptions']['DefaultValue'] = default + if source_field: + index['DateOptions']['SourceField'] = source_field + elif field_type == 'date-array': + index['DateArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['DateArrayOptions']['DefaultValue'] = default + if source_field: + index['DateArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'double': + index['DoubleOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['DoubleOptions']['DefaultValue'] = default + if source_field: + index['DoubleOptions']['SourceField'] = source_field + elif field_type == 'double-array': + index['DoubleArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['DoubleArrayOptions']['DefaultValue'] = default + if source_field: + index['DoubleArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'text': + index['TextOptions'] = { + 'ReturnEnabled': returnable, + 'HighlightEnabled': highlight, + 'SortEnabled': sortable + } + if default: + index['TextOptions']['DefaultValue'] = default + if source_field: + index['TextOptions']['SourceField'] = source_field + if analysis_scheme: + index['TextOptions']['AnalysisScheme'] = analysis_scheme + elif field_type == 'text-array': + index['TextArrayOptions'] = { + 'ReturnEnabled': returnable, + 'HighlightEnabled': highlight + } + if default: + index['TextArrayOptions']['DefaultValue'] = default + if source_field: + index['TextArrayOptions']['SourceFields'] = \ + ','.join(source_field) + if analysis_scheme: + index['TextArrayOptions']['AnalysisScheme'] = analysis_scheme + elif field_type == 'latlon': + index['LatLonOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['LatLonOptions']['DefaultValue'] = default + if source_field: + index['LatLonOptions']['SourceField'] = source_field + + data = self.layer1.define_index_field(self.name, index) + + data = (data['DefineIndexFieldResponse'] + ['DefineIndexFieldResult'] + ['IndexField']) + return IndexFieldStatus(self, data, self.layer1.describe_index_fields) @@ -323,10 +463,16 @@ def get_expressions(self, names=None): """ Return a list of rank expressions defined for this domain. :return: list of ExpressionStatus objects - :rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus` object + :rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus` + object """ fn = self.layer1.describe_expressions data = fn(self.name, names) + + data = (data['DescribeExpressionsResponse'] + ['DescribeExpressionsResult'] + ['Expressions']) + return [ExpressionStatus(self, d, fn) for d in data] def create_expression(self, name, value): @@ -345,7 +491,8 @@ def create_expression(self, name, value): * Single value, sort enabled numeric fields (int, double, date) * Other expressions - * The _score variable, which references a document's relevance score + * The _score variable, which references a document's relevance + score * The _time variable, which references the current epoch time * Integer, floating point, hex, and octal literals * Arithmetic operators: + - * / % @@ -361,10 +508,10 @@ def create_expression(self, name, value): Expressions always return an integer value from 0 to the maximum 64-bit signed integer value (2^63 - 1). Intermediate results are calculated as double-precision floating point values and the return - value is rounded to the nearest integer. If the expression is invalid - or evaluates to a negative value, it returns 0. If the expression - evaluates to a value greater than the maximum, it returns the maximum - value. + value is rounded to the nearest integer. If the expression is + invalid or evaluates to a negative value, it returns 0. If the + expression evaluates to a value greater than the maximum, it + returns the maximum value. The source data for an Expression can be the name of an IndexField of type int or double, another Expression or the @@ -386,6 +533,11 @@ def create_expression(self, name, value): InvalidTypeException, ResourceNotFoundException """ data = self.layer1.define_expression(self.name, name, value) + + data = (data['DefineExpressionResponse'] + ['DefineExpressionResult'] + ['Expression']) + return ExpressionStatus(self, data, self.layer1.describe_expressions) diff --git a/boto/cloudsearch2/exceptions.py b/boto/cloudsearch2/exceptions.py new file mode 100644 index 0000000000..c114113963 --- /dev/null +++ b/boto/cloudsearch2/exceptions.py @@ -0,0 +1,46 @@ +""" +Exceptions that are specific to the cloudsearch2 module. +""" +from boto.exception import BotoServerError + + +class InvalidTypeException(BotoServerError): + """ + Raised when an invalid record type is passed to CloudSearch. + """ + pass + + +class LimitExceededException(BotoServerError): + """ + Raised when a limit has been exceeded. + """ + pass + + +class InternalException(BotoServerError): + """ + A generic server-side error. + """ + pass + + +class DisabledOperationException(BotoServerError): + """ + Raised when an operation has been disabled. + """ + pass + + +class ResourceNotFoundException(BotoServerError): + """ + Raised when a requested resource does not exist. + """ + pass + + +class BaseException(BotoServerError): + """ + A generic server-side error. + """ + pass diff --git a/boto/cloudsearch2/layer1.py b/boto/cloudsearch2/layer1.py index 08c52e8558..2604ee97ec 100644 --- a/boto/cloudsearch2/layer1.py +++ b/boto/cloudsearch2/layer1.py @@ -1,6 +1,4 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. -# All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -20,897 +18,767 @@ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. +# + +try: + import json +except ImportError: + import simplejson as json import boto -import boto.jsonresponse -from boto.compat import json from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo - -#boto.set_stream_logger('cloudsearch') - - -def do_bool(val): - return 'true' if val in [True, 1, '1', 'true'] else 'false' - - -class Layer1(AWSQueryConnection): - - APIVersion = '2013-01-01' - #AuthServiceName = 'sqs' - DefaultRegionName = boto.config.get('Boto', 'cs_region_name', 'us-east-1') - DefaultRegionEndpoint = boto.config.get('Boto', 'cs_region_endpoint', - 'cloudsearch.us-east-1.amazonaws.com') - - def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, - is_secure=True, host=None, port=None, - proxy=None, proxy_port=None, - proxy_user=None, proxy_pass=None, debug=0, - https_connection_factory=None, region=None, path='/', - api_version=None, security_token=None, - validate_certs=True, profile_name=None): +from boto.exception import JSONResponseError +from boto.cloudsearch2 import exceptions + + +class CloudSearchConnection(AWSQueryConnection): + """ + Amazon CloudSearch Configuration Service + You use the Amazon CloudSearch configuration service to create, + configure, and manage search domains. Configuration service + requests are submitted using the AWS Query protocol. AWS Query + requests are HTTP or HTTPS requests submitted via HTTP GET or POST + with a query parameter named Action. + + The endpoint for configuration service requests is region- + specific: cloudsearch. region .amazonaws.com. For example, + cloudsearch.us-east-1.amazonaws.com. For a current list of + supported regions and endpoints, see `Regions and Endpoints`_. + """ + APIVersion = "2013-01-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidTypeException": exceptions.InvalidTypeException, + "LimitExceededException": exceptions.LimitExceededException, + "InternalException": exceptions.InternalException, + "DisabledOperationException": exceptions.DisabledOperationException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "BaseException": exceptions.BaseException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CloudSearchConnection, self).__init__(**kwargs) self.region = region - AWSQueryConnection.__init__( - self, - host=self.region.endpoint, - aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key, - is_secure=is_secure, - port=port, - proxy=proxy, - proxy_port=proxy_port, - proxy_user=proxy_user, - proxy_pass=proxy_pass, - debug=debug, - https_connection_factory=https_connection_factory, - path=path, - security_token=security_token, - validate_certs=validate_certs, - profile_name=profile_name) def _required_auth_capability(self): return ['hmac-v4'] - def get_response(self, doc_path, action, params, path='/', - parent=None, verb='GET', list_marker=None): - if not parent: - parent = self - response = self.make_request(action, params, path, verb) - body = response.read() - boto.log.debug(body) - if response.status == 200: - e = boto.jsonresponse.Element( - list_marker=list_marker if list_marker else 'Set', - pythonize_name=True) - h = boto.jsonresponse.XmlHandler(e, parent) - h.parse(body) - inner = e - for p in doc_path: - inner = inner.get(p) - if not inner: - return None if list_marker is None else [] - if isinstance(inner, list): - return inner - else: - return dict(**inner) - else: - raise self.ResponseError(response.status, response.reason, body) + def build_suggesters(self, domain_name): + """ + Indexes the search suggestions. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='BuildSuggesters', + verb='POST', + path='/', params=params) def create_domain(self, domain_name): """ - Create a new search domain. + Creates a new search domain. For more information, see + `Creating a Search Domain`_ in the Amazon CloudSearch + Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. + :param domain_name: A name for the domain you are creating. Allowed + characters are a-z (lower-case letters), 0-9, and hyphen (-). + Domain names must start with a letter or number and be at least 3 + and no more than 28 characters long. - :raises: BaseException, InternalException, LimitExceededException """ - doc_path = ('create_domain_response', - 'create_domain_result', - 'domain_status') - params = {'DomainName': domain_name} - return self.get_response(doc_path, 'CreateDomain', - params, verb='POST') + params = {'DomainName': domain_name, } + return self._make_request( + action='CreateDomain', + verb='POST', + path='/', params=params) - def define_analysis_scheme(self, domain_name, name, language, - algorithmic_stemming="none", stemming_dictionary=None, - stopwords=None, synonyms=None): + def define_analysis_scheme(self, domain_name, analysis_scheme): """ - Updates stemming options used by indexing for the search domain. + Configures an analysis scheme for a domain. An analysis scheme + defines language-specific text processing options for a `text` + field. For more information, see `Configuring Analysis + Schemes`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type name: str - :param name: Name of the analysis scheme - - :type language: str - :param language: IETF RFC 4646 lang code or 'mul' for multiple - languages. - - :type algorithmic_stemming: str - :param algorithmic_stemming: Which type of stemming to use. - one of ``none | minimal | light | full`` - - :type stemming_dictionary: dict - :param stemming_dictionary: dict of stemming words - ``{"running": "run", "jumping": "jump"}`` - - :type stopwords: list of strings - :param stopwords: list of stopwords - - :type synonyms: dict - :param synonyms: dict of Array of words to use as synonyms - ``{"aliases": {"running": ["run", "ran"], "jumping": ["jump", "jumped"]}, - "groups": [["sit", "sitting", "sat"], ["hit", "hitting"]]}`` - - :raises: BaseException, InternalException, InvalidTypeException, - LimitExceededException, ResourceNotFoundException - """ - doc_path = ('define_analysis_scheme_response', - 'define_analysis_scheme_result', - 'analysis_scheme') - params = {'DomainName': domain_name, 'AnalysisScheme.AnalysisSchemeName': name, - 'AnalysisScheme.AnalysisSchemeLanguage': language, - 'AnalysisScheme.AnalysisOptions.AlgorithmicStemming': algorithmic_stemming, - 'AnalysisScheme.AnalysisOptions.StemmingDictionary': - json.dumps(stemming_dictionary) if stemming_dictionary else dict(), - 'AnalysisScheme.AnalysisOptions.Stopwords': - json.dumps(stopwords) if stopwords else list(), - 'AnalysisScheme.AnalysisOptions.Synonyms': - json.dumps(synonyms) if synonyms else dict(), - } - - return self.get_response(doc_path, 'DefineAnalysisScheme', - params, verb='POST') - - def define_expression(self, domain_name, name, value): - """ - Defines an Expression, either replacing an existing - definition or creating a new one. + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type analysis_scheme: dict + :param analysis_scheme: Configuration information for an analysis + scheme. Each analysis scheme has a unique name and specifies the + language of the text to be processed. The following options can be + configured for an analysis scheme: `Synonyms`, `Stopwords`, + `StemmingDictionary`, and `AlgorithmicStemming`. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'AnalysisScheme', + analysis_scheme) + return self._make_request( + action='DefineAnalysisScheme', + verb='POST', + path='/', params=params) + + def define_expression(self, domain_name, expression): + """ + Configures an `Expression` for the search domain. Used to + create new expressions and modify existing ones. If the + expression exists, the new configuration replaces the old one. + For more information, see `Configuring Expressions`_ in the + Amazon CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type name: string - :param name: The name of an expression. - - :type value: string - :param value: The expression to evaluate for ranking or - thresholding while processing a search request. The - Expression syntax is based on JavaScript and supports: - - * Single value, sort enabled numeric fields (int, double, date) - * Other expressions - * The _score variable, which references a document's relevance score - * The _time variable, which references the current epoch time - * Integer, floating point, hex, and octal literals - * Arithmetic operators: + - * / % - * Bitwise operators: | & ^ ~ << >> >>> - * Boolean operators (including the ternary operator): && || ! ?: - * Comparison operators: < <= == >= > - * Mathematical functions: abs ceil exp floor ln log2 log10 logn - max min pow sqrt pow - * Trigonometric functions: acos acosh asin asinh atan atan2 atanh - cos cosh sin sinh tanh tan - * The haversin distance function - - Expressions always return an integer value from 0 to the maximum - 64-bit signed integer value (2^63 - 1). Intermediate results are - calculated as double-precision floating point values and the return - value is rounded to the nearest integer. If the expression is invalid - or evaluates to a negative value, it returns 0. If the expression - evaluates to a value greater than the maximum, it returns the maximum - value. - - The source data for an Expression can be the name of an - IndexField of type int or double, another Expression or the - reserved name _score, or the functions above. The _score source is - defined to return as a double with a floor of 0 to - indicate how relevant a document is to the search request, - taking into account repetition of search terms in the - document and proximity of search terms to each other in - each matching IndexField in the document. - - For more information about using expressions to customize results, - see the Amazon CloudSearch Developer Guide. - - :raises: BaseException, InternalException, LimitExceededException, - InvalidTypeException, ResourceNotFoundException - """ - doc_path = ('define_expression_response', - 'define_expression_result', - 'expression') - params = {'DomainName': domain_name, - 'Expression.ExpressionValue': value, - 'Expression.ExpressionName': name} - return self.get_response(doc_path, 'DefineExpression', - params, verb='POST') - - def define_index_field(self, domain_name, field_name, field_type, - default=None, facet=False, returnable=False, - searchable=False, sortable=False, - highlight=False, source_field=None, - analysis_scheme=None): - """ - Defines an ``IndexField``, either replacing an existing - definition or creating a new one. + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type expression: dict + :param expression: A named expression that can be evaluated at search + time. Can be used for sorting and filtering search results and + constructing other expressions. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'Expression', + expression) + return self._make_request( + action='DefineExpression', + verb='POST', + path='/', params=params) + + def define_index_field(self, domain_name, index_field): + """ + Configures an `IndexField` for the search domain. Used to + create new fields and modify existing ones. You must specify + the name of the domain you are configuring and an index field + configuration. The index field configuration specifies a + unique name, the index field type, and the options you want to + configure for the field. The options you can specify depend on + the `IndexFieldType`. If the field exists, the new + configuration replaces the old one. For more information, see + `Configuring Index Fields`_ in the Amazon CloudSearch + Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type field_name: string - :param field_name: The name of a field in the search index. - - :type field_type: string - :param field_type: The type of field. Valid values are - int | double | literal | text | date | latlon | - int-array | double-array | literal-array | text-array | date-array - - :type default: string or int - :param default: The default value for the field. If the - field is of type ``int`` this should be an integer value. - Otherwise, it's a string. - - :type facet: bool - :param facet: A boolean to indicate whether facets - are enabled for this field or not. Does not apply to - fields of type ``int, int-array, text, text-array``. - - :type returnable: bool - :param returnable: A boolean to indicate whether values - of this field can be returned in search results or - used in ranking. - - :type searchable: bool - :param searchable: A boolean to indicate whether search - is enabled for this field or not. - - :type sortable: bool - :param sortable: A boolean to indicate whether sorting - is enabled for this field or not. Does not apply to - fields of array types. - - :type highlight: bool - :param highlight: A boolean to indicate whether highlighting - is enabled for this field or not. Does not apply to - fields of type ``double, int, date, latlon`` - - :type source_field: list of strings or string - :param source_field: For array types, this is the list of fields - to treat as the source. For singular types, pass a string only. - - :type analysis_scheme: string - :param analysis_scheme: The analysis scheme to use for this field. - Only applies to ``text | text-array`` field types - - :raises: BaseException, InternalException, LimitExceededException, - InvalidTypeException, ResourceNotFoundException - """ - doc_path = ('define_index_field_response', - 'define_index_field_result', - 'index_field') - params = {'DomainName': domain_name, - 'IndexField.IndexFieldName': field_name, - 'IndexField.IndexFieldType': field_type} - if field_type == 'literal': - if default: - params['IndexField.LiteralOptions.DefaultValue'] = default - params['IndexField.LiteralOptions.FacetEnabled'] = do_bool(facet) - params['IndexField.LiteralOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.LiteralOptions.SearchEnabled'] = do_bool(searchable) - params['IndexField.LiteralOptions.SortEnabled'] = do_bool(sortable) - if source_field: - params['IndexField.LiteralOptions.SourceField'] = source_field - elif field_type == 'literal-array': - if default: - params['IndexField.LiteralArrayOptions.DefaultValue'] = default - params['IndexField.LiteralArrayOptions.FacetEnabled'] = do_bool(facet) - params['IndexField.LiteralArrayOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.LiteralArrayOptions.SearchEnabled'] = do_bool(searchable) - if source_field: - params['IndexField.LiteralArrayOptions.SourceFields'] = ','.join(source_field) - elif field_type == 'int': - if default: - params['IndexField.IntOptions.DefaultValue'] = default - params['IndexField.IntOptions.FacetEnabled'] = do_bool(facet) - params['IndexField.IntOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.IntOptions.SearchEnabled'] = do_bool(searchable) - params['IndexField.IntOptions.SortEnabled'] = do_bool(sortable) - if source_field: - params['IndexField.IntOptions.SourceField'] = source_field - elif field_type == 'int-array': - if default: - params['IndexField.IntArrayOptions.DefaultValue'] = default - params['IndexField.IntArrayOptions.FacetEnabled'] = do_bool(facet) - params['IndexField.IntArrayOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.IntArrayOptions.SearchEnabled'] = do_bool(searchable) - if source_field: - params['IndexField.IntArrayOptions.SourceFields'] = ','.join(source_field) - elif field_type == 'date': - if default: - params['IndexField.DateOptions.DefaultValue'] = default - params['IndexField.DateOptions.FacetEnabled'] = do_bool(facet) - params['IndexField.DateOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.DateOptions.SearchEnabled'] = do_bool(searchable) - params['IndexField.DateOptions.SortEnabled'] = do_bool(sortable) - if source_field: - params['IndexField.DateOptions.SourceField'] = source_field - elif field_type == 'date-array': - if default: - params['IndexField.DateArrayOptions.DefaultValue'] = default - params['IndexField.DateArrayOptions.FacetEnabled'] = do_bool(facet) - params['IndexField.DateArrayOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.DateArrayOptions.SearchEnabled'] = do_bool(searchable) - if source_field: - params['IndexField.DateArrayOptions.SourceFields'] = ','.join(source_field) - elif field_type == 'double': - if default: - params['IndexField.DoubleOptions.DefaultValue'] = default - params['IndexField.DoubleOptions.FacetEnabled'] = do_bool(facet) - params['IndexField.DoubleOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.DoubleOptions.SearchEnabled'] = do_bool(searchable) - params['IndexField.DoubleOptions.SortEnabled'] = do_bool(sortable) - if source_field: - params['IndexField.DoubleOptions.SourceField'] = source_field - elif field_type == 'double-array': - if default: - params['IndexField.DoubleArrayOptions.DefaultValue'] = default - params['IndexField.DoubleArrayOptions.FacetEnabled'] = do_bool(facet) - params['IndexField.DoubleArrayOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.DoubleArrayOptions.SearchEnabled'] = do_bool(searchable) - if source_field: - params['IndexField.DoubleArrayOptions.SourceFields'] = ','.join(source_field) - elif field_type == 'text': - if default: - params['IndexField.TextOptions.DefaultValue'] = default - params['IndexField.TextOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.TextOptions.HighlightEnabled'] = do_bool(highlight) - params['IndexField.TextOptions.SortEnabled'] = do_bool(sortable) - if source_field: - params['IndexField.TextOptions.SourceField'] = source_field - if analysis_scheme: - params['IndexField.TextOptions.AnalysisScheme'] = analysis_scheme - elif field_type == 'text-array': - if default: - params['IndexField.TextArrayOptions.DefaultValue'] = default - params['IndexField.TextArrayOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.TextArrayOptions.HighlightEnabled'] = do_bool(highlight) - if source_field: - params['IndexField.TextArrayOptions.SourceFields'] = ','.join(source_field) - if analysis_scheme: - params['IndexField.TextArrayOptions.AnalysisScheme'] = analysis_scheme - elif field_type == 'latlon': - if default: - params['IndexField.LatLonOptions.DefaultValue'] = default - params['IndexField.LatLonOptions.FacetEnabled'] = do_bool(facet) - params['IndexField.LatLonOptions.ReturnEnabled'] = do_bool(returnable) - params['IndexField.LatLonOptions.SearchEnabled'] = do_bool(searchable) - params['IndexField.LatLonOptions.SortEnabled'] = do_bool(sortable) - if source_field: - params['IndexField.LatLonOptions.SourceField'] = source_field - - return self.get_response(doc_path, 'DefineIndexField', - params, verb='POST') - - def define_suggester(self, domain_name, name, source_field, - fuzzy_matching=None, sort_expression=None): - """ - Defines an Expression, either replacing an existing - definition or creating a new one. + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type index_field: dict + :param index_field: The index field and field options you want to + configure. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'IndexField', + index_field) + return self._make_request( + action='DefineIndexField', + verb='POST', + path='/', params=params) + + def define_suggester(self, domain_name, suggester): + """ + Configures a suggester for a domain. A suggester enables you + to display possible matches before users finish typing their + queries. When you configure a suggester, you must specify the + name of the text field you want to search for possible matches + and a unique name for the suggester. For more information, see + `Getting Search Suggestions`_ in the Amazon CloudSearch + Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type name: string - :param name: The name of an suggester to use. + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). - :type source_field: string - :param source_field: The source field name to use for the ``Suggester`` + :type suggester: dict + :param suggester: Configuration information for a search suggester. + Each suggester has a unique name and specifies the text field you + want to use for suggestions. The following options can be + configured for a suggester: `FuzzyMatching`, `SortExpression`. - :type fuzzy_matching: string or None - :param fuzzy_matching: The optional type of fuzzy matching to use. One of - none | low | high - - :type sort_expression: string or None - :param sort_expression: The optional sort expression to use - - :raises: BaseException, InternalException, LimitExceededException, - InvalidTypeException, ResourceNotFoundException """ - doc_path = ('define_expression_response', - 'define_expression_result', - 'expression') - params = {'DomainName': domain_name, - 'Suggester.SuggesterName': name, - 'Suggester.DocumentSuggesterOptions.SourceField': source_field} - if fuzzy_matching is not None: - params['Suggester.DocumentSuggesterOptions.FuzzyMatching'] = fuzzy_matching - if sort_expression is not None: - params['Suggester.DocumentSuggesterOptions.SortExpression'] = sort_expression + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'Suggester', + suggester) + return self._make_request( + action='DefineSuggester', + verb='POST', + path='/', params=params) - return self.get_response(doc_path, 'DefineExpression', params, - verb='POST') - - def delete_analysis_scheme(self, domain_name, scheme_name): + def delete_analysis_scheme(self, domain_name, analysis_scheme_name): """ - Deletes an existing ``AnalysisScheme`` from the search domain. + Deletes an analysis scheme. For more information, see + `Configuring Analysis Schemes`_ in the Amazon CloudSearch + Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type scheme_name: string - :param scheme_name: The analysis scheme name to delete - - :raises: BaseException, InternalException, ResourceNotFoundException - """ - doc_path = ('delete_analysis_scheme_response', - 'delete_analysis_scheme_result', - 'analysis_scheme') - params = {'DomainName': domain_name, - 'AnalysisSchemeName': scheme_name} - return self.get_response(doc_path, 'DeleteAnalysisScheme', - params, verb='POST') + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type analysis_scheme_name: string + :param analysis_scheme_name: The name of the analysis scheme you want + to delete. + + """ + params = { + 'DomainName': domain_name, + 'AnalysisSchemeName': analysis_scheme_name, + } + return self._make_request( + action='DeleteAnalysisScheme', + verb='POST', + path='/', params=params) def delete_domain(self, domain_name): """ - Delete a search domain. + Permanently deletes a search domain and all of its data. Once + a domain has been deleted, it cannot be recovered. For more + information, see `Deleting a Search Domain`_ in the Amazon + CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. + :param domain_name: The name of the domain you want to permanently + delete. - :raises: BaseException, InternalException """ - doc_path = ('delete_domain_response', - 'delete_domain_result', - 'domain_status') - params = {'DomainName': domain_name} - return self.get_response(doc_path, 'DeleteDomain', - params, verb='POST') + params = {'DomainName': domain_name, } + return self._make_request( + action='DeleteDomain', + verb='POST', + path='/', params=params) - def delete_index_field(self, domain_name, field_name): + def delete_expression(self, domain_name, expression_name): """ - Deletes an existing ``IndexField`` from the search domain. + Removes an `Expression` from the search domain. For more + information, see `Configuring Expressions`_ in the Amazon + CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type field_name: string - :param field_name: A string that represents the name of - an index field. Field names must begin with a letter and - can contain the following characters: a-z (lowercase), - 0-9, and _ (underscore). Uppercase letters and hyphens are - not allowed. The names "body", "docid", and - "text_relevance" are reserved and cannot be specified as - field or rank expression names. - - :raises: BaseException, InternalException, ResourceNotFoundException - """ - doc_path = ('delete_index_field_response', - 'delete_index_field_result', - 'index_field') - params = {'DomainName': domain_name, - 'IndexFieldName': field_name} - return self.get_response(doc_path, 'DeleteIndexField', - params, verb='POST') - - def delete_expression(self, domain_name, name): - """ - Deletes an existing ``Expression`` from the search domain. + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). - :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. + :type expression_name: string + :param expression_name: The name of the `Expression` to delete. - :type name: string - :param name: Name of the ``Expression`` to delete. - - :raises: BaseException, InternalException, ResourceNotFoundException """ - doc_path = ('delete_expression_response', - 'delete_expression_result', - 'expression') - params = {'DomainName': domain_name, 'ExpressionName': name} - return self.get_response(doc_path, 'DeleteExpression', - params, verb='POST') + params = { + 'DomainName': domain_name, + 'ExpressionName': expression_name, + } + return self._make_request( + action='DeleteExpression', + verb='POST', + path='/', params=params) - def delete_suggester(self, domain_name, name): + def delete_index_field(self, domain_name, index_field_name): """ - Deletes an existing ``Suggester`` from the search domain. + Removes an `IndexField` from the search domain. For more + information, see `Configuring Index Fields`_ in the Amazon + CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). - :type name: string - :param name: Name of the ``Suggester`` to delete. + :type index_field_name: string + :param index_field_name: The name of the index field your want to + remove from the domain's indexing options. - :raises: BaseException, InternalException, ResourceNotFoundException """ - doc_path = ('delete_suggester_response', - 'delete_suggester_result', - 'suggester') - params = {'DomainName': domain_name, 'SuggesterName': name} - return self.get_response(doc_path, 'DeleteSuggester', - params, verb='POST') + params = { + 'DomainName': domain_name, + 'IndexFieldName': index_field_name, + } + return self._make_request( + action='DeleteIndexField', + verb='POST', + path='/', params=params) - def describe_analysis_schemes(self, domain_name): + def delete_suggester(self, domain_name, suggester_name): """ - Describes analysis schemes used by indexing for the search domain. + Deletes a suggester. For more information, see `Getting Search + Suggestions`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :raises: BaseException, InternalException, ResourceNotFoundException - """ - doc_path = ('describe_analysis_schemes_response', - 'describe_analysis_schemes_result', - 'analysis_schemes') - params = {'DomainName': domain_name} - return self.get_response(doc_path, 'DescribeAnalysisSchemes', - params, verb='POST') + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type suggester_name: string + :param suggester_name: Specifies the name of the suggester you want to + delete. + + """ + params = { + 'DomainName': domain_name, + 'SuggesterName': suggester_name, + } + return self._make_request( + action='DeleteSuggester', + verb='POST', + path='/', params=params) + + def describe_analysis_schemes(self, domain_name, + analysis_scheme_names=None, deployed=None): + """ + Gets the analysis schemes configured for a domain. An analysis + scheme defines language-specific text processing options for a + `text` field. Can be limited to specific analysis schemes by + name. By default, shows all analysis schemes and includes any + pending changes to the configuration. Set the `Deployed` + option to `True` to show the active configuration and exclude + pending changes. For more information, see `Configuring + Analysis Schemes`_ in the Amazon CloudSearch Developer Guide . - def describe_availability_options(self, domain_name): - """ - Describes the availability options for the search domain. + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type analysis_scheme_names: list + :param analysis_scheme_names: The analysis schemes you want to + describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if analysis_scheme_names is not None: + self.build_list_params(params, + analysis_scheme_names, + 'AnalysisSchemeNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeAnalysisSchemes', + verb='POST', + path='/', params=params) + + def describe_availability_options(self, domain_name, deployed=None): + """ + Gets the availability options configured for a domain. By + default, shows the configuration with any pending changes. Set + the `Deployed` option to `True` to show the active + configuration and exclude pending changes. For more + information, see `Configuring Availability Options`_ in the + Amazon CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :raises: BaseException, InternalException, ResourceNotFoundException - """ - doc_path = ('describe_availability_options_response', - 'describe_availability_options_result', - 'availability_options') - params = {'DomainName': domain_name} - return self.get_response(doc_path, 'DescribeAvailabilityOptions', - params, verb='POST') + :param domain_name: The name of the domain you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeAvailabilityOptions', + verb='POST', + path='/', params=params) def describe_domains(self, domain_names=None): """ - Describes the domains (optionally limited to one or more - domains by name) owned by this account. + Gets information about the search domains owned by this + account. Can be limited to specific domains. Shows all domains + by default. For more information, see `Getting Information + about a Search Domain`_ in the Amazon CloudSearch Developer + Guide . :type domain_names: list - :param domain_names: Limits the response to the specified domains. + :param domain_names: The names of the domains you want to include in + the response. - :raises: BaseException, InternalException """ - doc_path = ('describe_domains_response', - 'describe_domains_result', - 'domain_status_list') params = {} - if domain_names: - for i, domain_name in enumerate(domain_names, 1): - params['DomainNames.member.%d' % i] = domain_name - return self.get_response(doc_path, 'DescribeDomains', - params, verb='POST', - list_marker='DomainStatusList') - - def describe_expressions(self, domain_name, names=None): - """ - Describes RankExpressions in the search domain, optionally - limited to a single expression. + if domain_names is not None: + self.build_list_params(params, + domain_names, + 'DomainNames.member') + return self._make_request( + action='DescribeDomains', + verb='POST', + path='/', params=params) + + def describe_expressions(self, domain_name, expression_names=None, + deployed=None): + """ + Gets the expressions configured for the search domain. Can be + limited to specific expressions by name. By default, shows all + expressions and includes any pending changes to the + configuration. Set the `Deployed` option to `True` to show the + active configuration and exclude pending changes. For more + information, see `Configuring Expressions`_ in the Amazon + CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type names: list - :param names: Limit response to the specified names. - - :raises: BaseException, InternalException, ResourceNotFoundException - """ - doc_path = ('describe_expressions_response', - 'describe_expressions_result', - 'expressions') - params = {'DomainName': domain_name} - if names: - for i, expr_name in enumerate(names, 1): - params['ExpressionNames.member.%d' % i] = expr_name - return self.get_response(doc_path, 'DescribeExpressions', - params, verb='POST', - list_marker='Expressions') - - def describe_index_fields(self, domain_name, field_names=None): - """ - Describes index fields in the search domain, optionally - limited to a single ``IndexField``. + :param domain_name: The name of the domain you want to describe. + + :type expression_names: list + :param expression_names: Limits the `DescribeExpressions` response to + the specified expressions. If not specified, all expressions are + shown. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if expression_names is not None: + self.build_list_params(params, + expression_names, + 'ExpressionNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeExpressions', + verb='POST', + path='/', params=params) + + def describe_index_fields(self, domain_name, field_names=None, + deployed=None): + """ + Gets information about the index fields configured for the + search domain. Can be limited to specific fields by name. By + default, shows all fields and includes any pending changes to + the configuration. Set the `Deployed` option to `True` to show + the active configuration and exclude pending changes. For more + information, see `Getting Domain Information`_ in the Amazon + CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. + :param domain_name: The name of the domain you want to describe. :type field_names: list - :param field_names: Limits the response to the specified fields. - - :raises: BaseException, InternalException, ResourceNotFoundException - """ - doc_path = ('describe_index_fields_response', - 'describe_index_fields_result', - 'index_fields') - params = {'DomainName': domain_name} - if field_names: - for i, field_name in enumerate(field_names, 1): - params['FieldNames.member.%d' % i] = field_name - return self.get_response(doc_path, 'DescribeIndexFields', - params, verb='POST', - list_marker='IndexFields') + :param field_names: A list of the index fields you want to describe. If + not specified, information is returned for all configured index + fields. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if field_names is not None: + self.build_list_params(params, + field_names, + 'FieldNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeIndexFields', + verb='POST', + path='/', params=params) def describe_scaling_parameters(self, domain_name): """ - Describes the scaling parameters for the search domain. + Gets the scaling parameters configured for a domain. A + domain's scaling parameters specify the desired search + instance type and replication count. For more information, see + `Configuring Scaling Options`_ in the Amazon CloudSearch + Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). - :raises: BaseException, InternalException, ResourceNotFoundException """ - doc_path = ('describe_scaling_parameters_response', - 'describe_scaling_parameters_result', - 'scaling_parameters') - params = {'DomainName': domain_name} - return self.get_response(doc_path, 'DescribeScalingParameters', - params, verb='POST') + params = {'DomainName': domain_name, } + return self._make_request( + action='DescribeScalingParameters', + verb='POST', + path='/', params=params) - def describe_service_access_policies(self, domain_name): + def describe_service_access_policies(self, domain_name, deployed=None): """ - Describes the resource-based policies controlling access to - the services in this search domain. + Gets information about the access policies that control access + to the domain's document and search endpoints. By default, + shows the configuration with any pending changes. Set the + `Deployed` option to `True` to show the active configuration + and exclude pending changes. For more information, see + `Configuring Access for a Search Domain`_ in the Amazon + CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. + :param domain_name: The name of the domain you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeServiceAccessPolicies', + verb='POST', + path='/', params=params) + + def describe_suggesters(self, domain_name, suggester_names=None, + deployed=None): + """ + Gets the suggesters configured for a domain. A suggester + enables you to display possible matches before users finish + typing their queries. Can be limited to specific suggesters by + name. By default, shows all suggesters and includes any + pending changes to the configuration. Set the `Deployed` + option to `True` to show the active configuration and exclude + pending changes. For more information, see `Getting Search + Suggestions`_ in the Amazon CloudSearch Developer Guide . - :raises: BaseException, InternalException, ResourceNotFoundException - """ - doc_path = ('describe_service_access_policies_response', - 'describe_service_access_policies_result', - 'access_policies') - params = {'DomainName': domain_name} - return self.get_response(doc_path, 'DescribeServiceAccessPolicies', - params, verb='POST') + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type suggester_names: list + :param suggester_names: The suggesters you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if suggester_names is not None: + self.build_list_params(params, + suggester_names, + 'SuggesterNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeSuggesters', + verb='POST', + path='/', params=params) - def describe_suggesters(self, domain_name, names=None): + def index_documents(self, domain_name): """ - Describes the suggesters for the search domain. + Tells the search domain to start indexing its documents using + the latest indexing options. This operation must be invoked to + activate options whose OptionStatus is + `RequiresIndexDocuments`. :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type names: list - :param names: Limit response to the specified names. - - :raises: BaseException, InternalException, ResourceNotFoundException - """ - doc_path = ('describe_suggesters_response', - 'describe_suggesters_result', - 'suggesters') - params = {'DomainName': domain_name} - if names: - for i, suggester_name in enumerate(names, 1): - params['SuggesterNames.member.%d' % i] = suggester_name - - return self.get_response(doc_path, 'DescribeSuggesters', - params, verb='POST', list_marker="Suggesters") + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). - def index_documents(self, domain_name): """ - Tells the search domain to start scanning its documents using - the latest text processing options and ``IndexFields``. This - operation must be invoked to make visible in searches any - options whose OptionStatus has ``OptionState`` of - ``RequiresIndexDocuments``. + params = {'DomainName': domain_name, } + return self._make_request( + action='IndexDocuments', + verb='POST', + path='/', params=params) - :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :raises: BaseException, InternalException, ResourceNotFoundException - """ - doc_path = ('index_documents_response', - 'index_documents_result', - 'field_names') - params = {'DomainName': domain_name} - return self.get_response(doc_path, 'IndexDocuments', params, - verb='POST', list_marker='FieldNames') + def list_domain_names(self): + """ + Lists all search domains owned by an account. + + + """ + params = {} + return self._make_request( + action='ListDomainNames', + verb='POST', + path='/', params=params) def update_availability_options(self, domain_name, multi_az): """ - Updates availability options for the search domain. + Configures the availability options for a domain. Enabling the + Multi-AZ option expands an Amazon CloudSearch domain to an + additional Availability Zone in the same Region to increase + fault tolerance in the event of a service disruption. Changes + to the Multi-AZ option can take about half an hour to become + active. For more information, see `Configuring Availability + Options`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type multi_az: bool - :param multi_az: Should the domain be setup in multiple - Availability Zones - - :raises: BaseException, InternalException, InvalidTypeException, - LimitExceededException, ResourceNotFoundException - """ - doc_path = ('update_availability_options_response', - 'update_availability_options_result', - 'availability_options') - params = {'DomainName': domain_name, - 'MultiAZ': do_bool(multi_az)} - return self.get_response(doc_path, 'UpdateAvailabilityOptions', - params, verb='POST') - - def update_scaling_parameters(self, domain_name, instance_type=None, - replication_count=0): - """ - Updates scaling parameters for the search domain. + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type multi_az: boolean + :param multi_az: You expand an existing search domain to a second + Availability Zone by setting the Multi-AZ option to true. + Similarly, you can turn off the Multi-AZ option to downgrade the + domain to a single Availability Zone by setting the Multi-AZ option + to `False`. + + """ + params = {'DomainName': domain_name, 'MultiAZ': multi_az, } + return self._make_request( + action='UpdateAvailabilityOptions', + verb='POST', + path='/', params=params) + + def update_scaling_parameters(self, domain_name, scaling_parameters): + """ + Configures scaling parameters for a domain. A domain's scaling + parameters specify the desired search instance type and + replication count. Amazon CloudSearch will still automatically + scale your domain based on the volume of data and traffic, but + not below the desired instance type and replication count. If + the Multi-AZ option is enabled, these values control the + resources used per Availability Zone. For more information, + see `Configuring Scaling Options`_ in the Amazon CloudSearch + Developer Guide . :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - - :type instance_type: str or None - :param instance_type: The type of instance to use. One of - None | search.m1.small | search.m1.large | search.m2.xlarge | search.m2.2xlarge - - :type replication_count: int - :param replication_count: The desired number of replicas. A - value of 0 will reset to the default. - - :raises: BaseException, InternalException, InvalidTypeException, - LimitExceededException, ResourceNotFoundException - """ - doc_path = ('update_scaling_parameters_response', - 'update_scaling_parameters_result', - 'scaling_parameters') - params = {'DomainName': domain_name} - if instance_type is not None: - params["ScalingParameters.DesiredInstanceType"] = instance_type - if replication_count is not None: - params["ScalingParameters.DesiredReplicationCount"] = replication_count - return self.get_response(doc_path, 'UpdateScalingParameters', - params, verb='POST') + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type scaling_parameters: dict + :param scaling_parameters: The desired instance type and desired number + of replicas of each index partition. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'ScalingParameters', + scaling_parameters) + return self._make_request( + action='UpdateScalingParameters', + verb='POST', + path='/', params=params) def update_service_access_policies(self, domain_name, access_policies): """ - Updates the policies controlling access to the services in - this search domain. + Configures the access rules that control access to the + domain's document and search endpoints. For more information, + see ` Configuring Access for an Amazon CloudSearch Domain`_. :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). :type access_policies: string - :param access_policies: An IAM access policy as described in - The Access Policy Language in Using AWS Identity and - Access Management. The maximum size of an access policy - document is 100KB. - - :raises: BaseException, InternalException, LimitExceededException, - ResourceNotFoundException, InvalidTypeException - """ - doc_path = ('update_service_access_policies_response', - 'update_service_access_policies_result', - 'access_policies') - params = {'AccessPolicies': access_policies, - 'DomainName': domain_name} - return self.get_response(doc_path, 'UpdateServiceAccessPolicies', - params, verb='POST') + :param access_policies: The access rules you want to configure. These + rules replace any existing rules. + + """ + params = { + 'DomainName': domain_name, + 'AccessPolicies': access_policies, + } + return self._make_request( + action='UpdateServiceAccessPolicies', + verb='POST', + path='/', params=params) + + def build_complex_param(self, params, label, value): + """Serialize a structure. + + For example:: + + param_type = 'structure' + label = 'IndexField' + value = {'IndexFieldName': 'a', 'IntOptions': {'DefaultValue': 5}} + + would result in the params dict being updated with these params:: + + IndexField.IndexFieldName = a + IndexField.IntOptions.DefaultValue = 5 + + :type params: dict + :param params: The params dict. The complex list params + will be added to this dict. + + :type label: str + :param label: String label for param key + + :type value: any + :param value: The value to serialize + """ + for k, v in value.items(): + if type(v) in [dict]: + for k2, v2 in v.items(): + self.build_complex_param(params, label + '.' + k, v) + elif type(v) in [bool]: + params['%s.%s' % (label, k)] = v and 'true' or 'false' + else: + params['%s.%s' % (label, k)] = v + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/boto/cloudsearch2/layer2.py b/boto/cloudsearch2/layer2.py index bd73e52363..d76c25e809 100644 --- a/boto/cloudsearch2/layer2.py +++ b/boto/cloudsearch2/layer2.py @@ -22,7 +22,7 @@ # IN THE SOFTWARE. # -from .layer1 import Layer1 +from .layer1 import CloudSearchConnection from .domain import Domain @@ -32,7 +32,15 @@ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, host=None, debug=0, session_token=None, region=None, validate_certs=True): - self.layer1 = Layer1( + + if type(region) in [str, unicode]: + import boto.cloudsearch2 + for region_info in boto.cloudsearch2.regions(): + if region_info.name == region: + region = region_info + break + + self.layer1 = CloudSearchConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=is_secure, @@ -52,6 +60,11 @@ def list_domains(self, domain_names=None): :rtype: list of :class:`boto.cloudsearch2.domain.Domain` """ domain_data = self.layer1.describe_domains(domain_names) + + domain_data = (domain_data['DescribeDomainsResponse'] + ['DescribeDomainsResult'] + ['DomainStatusList']) + return [Domain(self.layer1, data) for data in domain_data] def create_domain(self, domain_name): @@ -61,7 +74,9 @@ def create_domain(self, domain_name): :rtype: :class:`boto.cloudsearch2.domain.Domain` """ data = self.layer1.create_domain(domain_name) - return Domain(self.layer1, data) + return Domain(self.layer1, data['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) def lookup(self, domain_name): """ diff --git a/boto/cloudsearch2/optionstatus.py b/boto/cloudsearch2/optionstatus.py index a633eba001..9531ca859b 100644 --- a/boto/cloudsearch2/optionstatus.py +++ b/boto/cloudsearch2/optionstatus.py @@ -51,21 +51,23 @@ class OptionStatus(dict): option was last updated. """ - def __init__(self, domain, data=None, refresh_fn=None, save_fn=None): + def __init__(self, domain, data=None, refresh_fn=None, refresh_key=None, + save_fn=None): self.domain = domain self.refresh_fn = refresh_fn + self.refresh_key = refresh_key self.save_fn = save_fn self.refresh(data) def _update_status(self, status): - self.creation_date = status['creation_date'] - self.status = status['state'] - self.update_date = status['update_date'] - self.update_version = int(status['update_version']) + self.creation_date = status['CreationDate'] + self.status = status['State'] + self.update_date = status['UpdateDate'] + self.update_version = int(status['UpdateVersion']) def _update_options(self, options): if options: - self.update(json.loads(options)) + self.update(options) def refresh(self, data=None): """ @@ -76,9 +78,14 @@ def refresh(self, data=None): if not data: if self.refresh_fn: data = self.refresh_fn(self.domain.name) + + if data and self.refresh_key: + # Attempt to pull out the right nested bag of data + for key in self.refresh_key: + data = data[key] if data: - self._update_status(data['status']) - self._update_options(data['options']) + self._update_status(data['Status']) + self._update_options(data['Options']) def to_json(self): """ @@ -86,23 +93,6 @@ def to_json(self): """ return json.dumps(self) - def startElement(self, name, attrs, connection): - return None - - def endElement(self, name, value, connection): - if name == 'CreationDate': - self.created = value - elif name == 'State': - self.state = value - elif name == 'UpdateDate': - self.updated = value - elif name == 'UpdateVersion': - self.update_version = int(value) - elif name == 'Options': - self.update_from_json_doc(value) - else: - setattr(self, name, value) - def save(self): """ Write the current state of the local object back to the @@ -123,30 +113,20 @@ def wait_for_state(self, state): class IndexFieldStatus(OptionStatus): - - def _update_options(self, options): - self.update(options) - def save(self): pass class AvailabilityOptionsStatus(OptionStatus): - - def _update_options(self, options): - self.update(MultiAZ=json.loads(options)) - def save(self): pass class ScalingParametersStatus(IndexFieldStatus): - pass class ExpressionStatus(IndexFieldStatus): - pass diff --git a/boto/cloudsearch2/search.py b/boto/cloudsearch2/search.py index 0315581614..bfeca5a94d 100644 --- a/boto/cloudsearch2/search.py +++ b/boto/cloudsearch2/search.py @@ -21,6 +21,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # +import json from math import ceil import boto from boto.compat import json @@ -43,13 +44,10 @@ class CommitMismatchError(Exception): class SearchResults(object): def __init__(self, **attrs): self.rid = attrs['status']['rid'] - # self.doc_coverage_pct = attrs['info']['doc-coverage-pct'] self.time_ms = attrs['status']['time-ms'] self.hits = attrs['hits']['found'] self.docs = attrs['hits']['hit'] self.start = attrs['hits']['start'] - #self.rank = attrs['rank'] - #self.match_expression = attrs['match-expr'] self.query = attrs['query'] self.search_service = attrs['search_service'] @@ -57,7 +55,7 @@ def __init__(self, **attrs): if 'facets' in attrs: for (facet, values) in attrs['facets'].iteritems(): if 'buckets' in values: - self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['buckets'])) + self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values.get('buckets', []))) self.num_pages_needed = ceil(self.hits / self.query.real_size) @@ -131,6 +129,8 @@ def to_params(self): if self.facet: for k, v in self.facet.iteritems(): + if type(v) not in [str, unicode]: + v = json.dumps(v) params['facet.%s' % k] = v if self.highlight: diff --git a/docs/source/ref/cloudsearch.rst b/docs/source/ref/cloudsearch.rst index 1610200a54..98d4a22403 100644 --- a/docs/source/ref/cloudsearch.rst +++ b/docs/source/ref/cloudsearch.rst @@ -4,56 +4,51 @@ Cloudsearch =========== -boto.cloudsearch ----------------- +boto.cloudsearch2 +----------------- -.. automodule:: boto.cloudsearch - :members: +.. automodule:: boto.cloudsearch2 + :members: :undoc-members: -boto.cloudsearch.domain ------------------------ +boto.cloudsearch2.domain +------------------------ -.. automodule:: boto.cloudsearch.domain +.. automodule:: boto.cloudsearch2.domain :members: :undoc-members: -boto.cloudsearch.layer1 ------------------------ +boto.cloudsearch2.layer1 +------------------------ -.. automodule:: boto.cloudsearch.layer1 +.. automodule:: boto.cloudsearch2.layer1 :members: :undoc-members: -boto.cloudsearch.layer2 ------------------------ +boto.cloudsearch2.layer2 +------------------------ -.. automodule:: boto.cloudsearch.layer2 +.. automodule:: boto.cloudsearch2.layer2 :members: :undoc-members: -boto.cloudsearch.optionstatus ------------------------------ +boto.cloudsearch2.optionstatus +------------------------------ -.. automodule:: boto.cloudsearch.optionstatus +.. automodule:: boto.cloudsearch2.optionstatus :members: :undoc-members: -boto.cloudsearch.search ------------------------ +boto.cloudsearch2.search +------------------------ -.. automodule:: boto.cloudsearch.search +.. automodule:: boto.cloudsearch2.search :members: :undoc-members: -boto.cloudsearch.document -------------------------- +boto.cloudsearch2.document +-------------------------- -.. automodule:: boto.cloudsearch.document +.. automodule:: boto.cloudsearch2.document :members: :undoc-members: - - - - - diff --git a/docs/source/ref/cloudsearch2.rst b/docs/source/ref/cloudsearch2.rst new file mode 100644 index 0000000000..bac2d86663 --- /dev/null +++ b/docs/source/ref/cloudsearch2.rst @@ -0,0 +1,61 @@ +.. ref-cloudsearch + +=========== +Cloudsearch +=========== + +boto.cloudsearch +---------------- + +.. automodule:: boto.cloudsearch + :members: + :undoc-members: + +boto.cloudsearch.domain +----------------------- + +.. automodule:: boto.cloudsearch.domain + :members: + :undoc-members: + +boto.cloudsearch.exceptions +----------------------- + +.. automodule:: boto.cloudsearch.exceptions + :members: + :undoc-members: + +boto.cloudsearch.layer1 +----------------------- + +.. automodule:: boto.cloudsearch.layer1 + :members: + :undoc-members: + +boto.cloudsearch.layer2 +----------------------- + +.. automodule:: boto.cloudsearch.layer2 + :members: + :undoc-members: + +boto.cloudsearch.optionstatus +----------------------------- + +.. automodule:: boto.cloudsearch.optionstatus + :members: + :undoc-members: + +boto.cloudsearch.search +----------------------- + +.. automodule:: boto.cloudsearch.search + :members: + :undoc-members: + +boto.cloudsearch.document +------------------------- + +.. automodule:: boto.cloudsearch.document + :members: + :undoc-members: diff --git a/setup.py b/setup.py index 7e39e93ac3..c14b88bb1b 100644 --- a/setup.py +++ b/setup.py @@ -75,7 +75,8 @@ def readme(): "boto.beanstalk", "boto.datapipeline", "boto.elasticache", "boto.elastictranscoder", "boto.opsworks", "boto.redshift", "boto.dynamodb2", "boto.support", "boto.cloudtrail", - "boto.directconnect", "boto.kinesis", "boto.rds2"], + "boto.directconnect", "boto.kinesis", "boto.rds2", + "boto.cloudsearch2"], package_data = { "boto.cacerts": ["cacerts.txt"], "boto": ["endpoints.json"], diff --git a/tests/integration/cloudsearch2/test_layers.py b/tests/integration/cloudsearch2/test_layers.py index 90ceda46b8..d2b1375647 100644 --- a/tests/integration/cloudsearch2/test_layers.py +++ b/tests/integration/cloudsearch2/test_layers.py @@ -26,7 +26,7 @@ import time from tests.unit import unittest -from boto.cloudsearch2.layer1 import Layer1 +from boto.cloudsearch2.layer1 import CloudSearchConnection from boto.cloudsearch2.layer2 import Layer2 from boto.regioninfo import RegionInfo @@ -36,13 +36,18 @@ class CloudSearchLayer1Test(unittest.TestCase): def setUp(self): super(CloudSearchLayer1Test, self).setUp() - self.layer1 = Layer1() + self.layer1 = CloudSearchConnection() self.domain_name = 'test-%d' % int(time.time()) def test_create_domain(self): resp = self.layer1.create_domain(self.domain_name) + + resp = (resp['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + self.addCleanup(self.layer1.delete_domain, self.domain_name) - self.assertTrue(resp.get('created', False)) + self.assertTrue(resp.get('Created', False)) class CloudSearchLayer2Test(unittest.TestCase): diff --git a/tests/unit/cloudsearch2/test_connection.py b/tests/unit/cloudsearch2/test_connection.py index 6a2a0200b9..e31d7190e2 100644 --- a/tests/unit/cloudsearch2/test_connection.py +++ b/tests/unit/cloudsearch2/test_connection.py @@ -3,48 +3,50 @@ from tests.unit import AWSMockServiceTestCase from boto.cloudsearch2.domain import Domain -from boto.cloudsearch2.layer1 import Layer1 +from boto.cloudsearch2.layer1 import CloudSearchConnection -import json class TestCloudSearchCreateDomain(AWSMockServiceTestCase): - connection_class = Layer1 + connection_class = CloudSearchConnection def default_body(self): return """ - - - - 0 - - arn:aws:cs:us-east-1:1234567890:domain/demo - search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com - - true - 1234567890/demo - false - 0 - demo - false - false - - arn:aws:cs:us-east-1:1234567890:domain/demo - doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com - - - - - 00000000-0000-0000-0000-000000000000 - - +{ + "CreateDomainResponse": { + "CreateDomainResult": { + "DomainStatus": { + "SearchInstanceType": null, + "DomainId": "1234567890/demo", + "DomainName": "demo", + "Deleted": false, + "SearchInstanceCount": 0, + "Created": true, + "SearchService": { + "Endpoint": "search-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "RequiresIndexDocuments": false, + "Processing": false, + "DocService": { + "Endpoint": "doc-demo.us-east-1.cloudsearch.amazonaws.com" + }, + "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo", + "SearchPartitionCount": 0 + } + }, + "ResponseMetadata": { + "RequestId": "00000000-0000-0000-0000-000000000000" + } + } +} """ def test_create_domain(self): self.set_http_response(status_code=200) - api_response = self.service_connection.create_domain('demo') + self.service_connection.create_domain('demo') self.assert_request_parameters({ 'Action': 'CreateDomain', + 'ContentType': 'JSON', 'DomainName': 'demo', 'Version': '2013-01-01', }) @@ -54,22 +56,26 @@ def test_cloudsearch_connect_result_endpoints(self): self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') - domain = Domain(self, api_response) + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) self.assertEqual( domain.doc_service_endpoint, - "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + "doc-demo.us-east-1.cloudsearch.amazonaws.com") self.assertEqual(domain.service_arn, "arn:aws:cs:us-east-1:1234567890:domain/demo") self.assertEqual( domain.search_service_endpoint, - "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + "search-demo.us-east-1.cloudsearch.amazonaws.com") def test_cloudsearch_connect_result_statuses(self): """Check that domain statuses are correctly returned from AWS""" self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') - domain = Domain(self, api_response) + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) self.assertEqual(domain.created, True) self.assertEqual(domain.processing, False) @@ -80,7 +86,9 @@ def test_cloudsearch_connect_result_details(self): """Check that the domain information is correctly returned from AWS""" self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') - domain = Domain(self, api_response) + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) self.assertEqual(domain.id, "1234567890/demo") self.assertEqual(domain.name, "demo") @@ -88,56 +96,62 @@ def test_cloudsearch_connect_result_details(self): def test_cloudsearch_documentservice_creation(self): self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') - domain = Domain(self, api_response) + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) document = domain.get_document_service() self.assertEqual( document.endpoint, - "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + "doc-demo.us-east-1.cloudsearch.amazonaws.com") def test_cloudsearch_searchservice_creation(self): self.set_http_response(status_code=200) api_response = self.service_connection.create_domain('demo') - domain = Domain(self, api_response) + domain = Domain(self, api_response['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) search = domain.get_search_service() self.assertEqual( search.endpoint, - "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") + "search-demo.us-east-1.cloudsearch.amazonaws.com") class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase): - connection_class = Layer1 + connection_class = CloudSearchConnection def default_body(self): return """ - - - - 0 - - arn:aws:cs:us-east-1:1234567890:search/demo - search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com - - true - 1234567890/demo - false - 0 - demo - false - false - - arn:aws:cs:us-east-1:1234567890:doc/demo - doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com - - - - - 00000000-0000-0000-0000-000000000000 - - +{ + "DeleteDomainResponse": { + "DeleteDomainResult": { + "DomainStatus": { + "SearchInstanceType": null, + "DomainId": "1234567890/demo", + "DomainName": "test", + "Deleted": true, + "SearchInstanceCount": 0, + "Created": true, + "SearchService": { + "Endpoint": null + }, + "RequiresIndexDocuments": false, + "Processing": false, + "DocService": { + "Endpoint": null + }, + "ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo", + "SearchPartitionCount": 0 + } + }, + "ResponseMetadata": { + "RequestId": "00000000-0000-0000-0000-000000000000" + } + } +} """ def test_cloudsearch_deletion(self): @@ -146,52 +160,55 @@ def test_cloudsearch_deletion(self): cloudsearch connection. """ self.set_http_response(status_code=200) - api_response = self.service_connection.delete_domain('demo') + self.service_connection.delete_domain('demo') self.assert_request_parameters({ 'Action': 'DeleteDomain', + 'ContentType': 'JSON', 'DomainName': 'demo', 'Version': '2013-01-01', }) class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase): - connection_class = Layer1 + connection_class = CloudSearchConnection def default_body(self): return """ - - - - average_score - brand_id - colors - context - context_owner - created_at - creator_id - description - file_size - format - has_logo - has_messaging - height - image_id - ingested_from - is_advertising - is_photo - is_reviewed - modified_at - subject_date - tags - title - width - - - - eb2b2390-6bbd-11e2-ab66-93f3a90dcf2a - - +{ + "IndexDocumentsResponse": { + "IndexDocumentsResult": { + "FieldNames": [ + "average_score", + "brand_id", + "colors", + "context", + "context_owner", + "created_at", + "creator_id", + "description", + "file_size", + "format", + "has_logo", + "has_messaging", + "height", + "image_id", + "ingested_from", + "is_advertising", + "is_photo", + "is_reviewed", + "modified_at", + "subject_date", + "tags", + "title", + "width" + ] + }, + "ResponseMetadata": { + "RequestId": "42e618d9-c4d9-11e3-8242-c32da3041159" + } + } +} """ def test_cloudsearch_index_documents(self): @@ -200,10 +217,11 @@ def test_cloudsearch_index_documents(self): domain. """ self.set_http_response(status_code=200) - api_response = self.service_connection.index_documents('demo') + self.service_connection.index_documents('demo') self.assert_request_parameters({ 'Action': 'IndexDocuments', + 'ContentType': 'JSON', 'DomainName': 'demo', 'Version': '2013-01-01', }) @@ -216,13 +234,17 @@ def test_cloudsearch_index_documents_resp(self): self.set_http_response(status_code=200) api_response = self.service_connection.index_documents('demo') - self.assertEqual(api_response, ['average_score', 'brand_id', 'colors', - 'context', 'context_owner', - 'created_at', 'creator_id', - 'description', 'file_size', 'format', - 'has_logo', 'has_messaging', 'height', - 'image_id', 'ingested_from', - 'is_advertising', 'is_photo', - 'is_reviewed', 'modified_at', - 'subject_date', 'tags', 'title', - 'width']) + fields = (api_response['IndexDocumentsResponse'] + ['IndexDocumentsResult'] + ['FieldNames']) + + self.assertEqual(fields, ['average_score', 'brand_id', 'colors', + 'context', 'context_owner', + 'created_at', 'creator_id', + 'description', 'file_size', 'format', + 'has_logo', 'has_messaging', 'height', + 'image_id', 'ingested_from', + 'is_advertising', 'is_photo', + 'is_reviewed', 'modified_at', + 'subject_date', 'tags', 'title', + 'width']) diff --git a/tests/unit/cloudsearch2/test_document.py b/tests/unit/cloudsearch2/test_document.py index 7d9d011f7c..fc42403c71 100644 --- a/tests/unit/cloudsearch2/test_document.py +++ b/tests/unit/cloudsearch2/test_document.py @@ -41,13 +41,12 @@ def test_cloudsearch_add_basics(self): """ document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.add("1234", 10, {"id": "1234", "title": "Title 1", - "category": ["cat_a", "cat_b", "cat_c"]}) + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) document.commit() args = json.loads(HTTPretty.last_request.body)[0] - self.assertEqual(args['lang'], 'en') self.assertEqual(args['type'], 'add') def test_cloudsearch_add_single_basic(self): @@ -57,14 +56,13 @@ def test_cloudsearch_add_single_basic(self): """ document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.add("1234", 10, {"id": "1234", "title": "Title 1", - "category": ["cat_a", "cat_b", "cat_c"]}) + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) document.commit() args = json.loads(HTTPretty.last_request.body)[0] self.assertEqual(args['id'], '1234') - self.assertEqual(args['version'], 10) self.assertEqual(args['type'], 'add') def test_cloudsearch_add_single_fields(self): @@ -73,8 +71,8 @@ def test_cloudsearch_add_single_fields(self): """ document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.add("1234", 10, {"id": "1234", "title": "Title 1", - "category": ["cat_a", "cat_b", "cat_c"]}) + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) document.commit() args = json.loads(HTTPretty.last_request.body)[0] @@ -90,8 +88,8 @@ def test_cloudsearch_add_single_result(self): """ document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.add("1234", 10, {"id": "1234", "title": "Title 1", - "category": ["cat_a", "cat_b", "cat_c"]}) + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) doc = document.commit() self.assertEqual(doc.status, 'success') @@ -111,17 +109,15 @@ class CloudSearchDocumentMultipleAddTest(CloudSearchDocumentTest): objs = { '1234': { - 'version': 10, 'fields': {"id": "1234", "title": "Title 1", - "category": ["cat_a", "cat_b", - "cat_c"]}}, + 'fields': {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}}, '1235': { - 'version': 11, 'fields': {"id": "1235", "title": "Title 2", + 'fields': {"id": "1235", "title": "Title 2", "category": ["cat_b", "cat_c", "cat_d"]}}, '1236': { - 'version': 12, 'fields': {"id": "1236", "title": "Title 3", - "category": ["cat_e", "cat_f", - "cat_g"]}}, + 'fields': {"id": "1236", "title": "Title 3", + "category": ["cat_e", "cat_f", "cat_g"]}}, } @@ -130,14 +126,13 @@ def test_cloudsearch_add_basics(self): document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") for (key, obj) in self.objs.items(): - document.add(key, obj['version'], obj['fields']) + document.add(key, obj['fields']) document.commit() args = json.loads(HTTPretty.last_request.body) for arg in args: self.assertTrue(arg['id'] in self.objs) - self.assertEqual(arg['version'], self.objs[arg['id']]['version']) self.assertEqual(arg['fields']['id'], self.objs[arg['id']]['fields']['id']) self.assertEqual(arg['fields']['title'], @@ -153,7 +148,7 @@ def test_cloudsearch_add_results(self): document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") for (key, obj) in self.objs.items(): - document.add(key, obj['version'], obj['fields']) + document.add(key, obj['fields']) doc = document.commit() self.assertEqual(doc.status, 'success') @@ -175,11 +170,10 @@ def test_cloudsearch_delete(self): """ document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.delete("5", "10") + document.delete("5") document.commit() args = json.loads(HTTPretty.last_request.body)[0] - self.assertEqual(args['version'], '10') self.assertEqual(args['type'], 'delete') self.assertEqual(args['id'], '5') @@ -189,7 +183,7 @@ def test_cloudsearch_delete_results(self): """ document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.delete("5", "10") + document.delete("5") doc = document.commit() self.assertEqual(doc.status, 'success') @@ -207,8 +201,8 @@ class CloudSearchDocumentDeleteMultiple(CloudSearchDocumentTest): def test_cloudsearch_delete_multiples(self): document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.delete("5", "10") - document.delete("6", "11") + document.delete("5") + document.delete("6") document.commit() args = json.loads(HTTPretty.last_request.body) @@ -216,13 +210,6 @@ def test_cloudsearch_delete_multiples(self): for arg in args: self.assertEqual(arg['type'], 'delete') - if arg['id'] == '5': - self.assertEqual(arg['version'], '10') - elif arg['id'] == '6': - self.assertEqual(arg['version'], '11') - else: # Unknown result out of AWS that shouldn't be there - self.assertTrue(False) - class CloudSearchSDFManipulation(CloudSearchDocumentTest): response = { @@ -242,8 +229,8 @@ def test_cloudsearch_single_document_sdf(self): document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.add("1234", 10, {"id": "1234", "title": "Title 1", - "category": ["cat_a", "cat_b", "cat_c"]}) + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) self.assertNotEqual(document.get_sdf(), '[]') @@ -264,8 +251,8 @@ def test_cloudsearch_erroneous_sdf(self): document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.add("1234", 10, {"id": "1234", "title": None, - "category": ["cat_a", "cat_b", "cat_c"]}) + document.add("1234", {"id": "1234", "title": None, + "category": ["cat_a", "cat_b", "cat_c"]}) document.commit() self.assertNotEqual(len(boto.log.error.call_args_list), 1) @@ -284,8 +271,8 @@ class CloudSearchDocumentErrorBadUnicode(CloudSearchDocumentTest): def test_fake_bad_unicode(self): document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.add("1234", 10, {"id": "1234", "title": "Title 1", - "category": ["cat_a", "cat_b", "cat_c"]}) + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) self.assertRaises(EncodingError, document.commit) @@ -300,8 +287,8 @@ class CloudSearchDocumentErrorDocsTooBig(CloudSearchDocumentTest): def test_fake_docs_too_big(self): document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.add("1234", 10, {"id": "1234", "title": "Title 1", - "category": ["cat_a", "cat_b", "cat_c"]}) + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) self.assertRaises(ContentTooLongError, document.commit) @@ -318,7 +305,7 @@ def test_fake_failure(self): document = DocumentServiceConnection( endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com") - document.add("1234", 10, {"id": "1234", "title": "Title 1", - "category": ["cat_a", "cat_b", "cat_c"]}) + document.add("1234", {"id": "1234", "title": "Title 1", + "category": ["cat_a", "cat_b", "cat_c"]}) self.assertRaises(CommitMismatchError, document.commit) diff --git a/tests/unit/cloudsearch2/test_search.py b/tests/unit/cloudsearch2/test_search.py index 63ac2f4ff6..e40dc3965c 100644 --- a/tests/unit/cloudsearch2/test_search.py +++ b/tests/unit/cloudsearch2/test_search.py @@ -19,31 +19,52 @@ class CloudSearchSearchBaseTest(unittest.TestCase): hits = [ { 'id': '12341', - 'title': 'Document 1', + 'fields': { + 'title': 'Document 1', + 'rank': 1 + } }, { 'id': '12342', - 'title': 'Document 2', + 'fields': { + 'title': 'Document 2', + 'rank': 2 + } }, { 'id': '12343', - 'title': 'Document 3', + 'fields': { + 'title': 'Document 3', + 'rank': 3 + } }, { 'id': '12344', - 'title': 'Document 4', + 'fields': { + 'title': 'Document 4', + 'rank': 4 + } }, { 'id': '12345', - 'title': 'Document 5', + 'fields': { + 'title': 'Document 5', + 'rank': 5 + } }, { 'id': '12346', - 'title': 'Document 6', + 'fields': { + 'title': 'Document 6', + 'rank': 6 + } }, { 'id': '12347', - 'title': 'Document 7', + 'fields': { + 'title': 'Document 7', + 'rank': 7 + } }, ] @@ -80,7 +101,7 @@ class CloudSearchSearchTest(CloudSearchSearchBaseTest): 'start': 0, 'hit':CloudSearchSearchBaseTest.hits }, - 'info': { + 'status': { 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', 'time-ms': 2, 'cpu-time-ms': 0 @@ -99,14 +120,6 @@ def test_cloudsearch_qsearch(self): self.assertEqual(args['start'], ["0"]) self.assertEqual(args['size'], ["10"]) - def test_cloudsearch_bqsearch(self): - search = SearchConnection(endpoint=HOSTNAME) - - search.search(bq="'Test'") - - args = self.get_args(HTTPretty.last_request.raw_requestline) - - self.assertEqual(args['bq'], ["'Test'"]) def test_cloudsearch_search_details(self): search = SearchConnection(endpoint=HOSTNAME) @@ -119,34 +132,16 @@ def test_cloudsearch_search_details(self): self.assertEqual(args['size'], ["50"]) self.assertEqual(args['start'], ["20"]) - def test_cloudsearch_facet_single(self): - search = SearchConnection(endpoint=HOSTNAME) - - search.search(q='Test', facet=["Author"]) - - args = self.get_args(HTTPretty.last_request.raw_requestline) - - self.assertEqual(args['facet'], ["Author"]) - - def test_cloudsearch_facet_multiple(self): - search = SearchConnection(endpoint=HOSTNAME) - - search.search(q='Test', facet=["author", "cat"]) - - args = self.get_args(HTTPretty.last_request.raw_requestline) - - self.assertEqual(args['facet'], ["author,cat"]) - def test_cloudsearch_facet_constraint_single(self): search = SearchConnection(endpoint=HOSTNAME) search.search( q='Test', - facet_constraints={'author': "'John Smith','Mark Smith'"}) + facet={'author': "'John Smith','Mark Smith'"}) args = self.get_args(HTTPretty.last_request.raw_requestline) - self.assertEqual(args['facet-author-constraints'], + self.assertEqual(args['facet.author'], ["'John Smith','Mark Smith'"]) def test_cloudsearch_facet_constraint_multiple(self): @@ -154,72 +149,37 @@ def test_cloudsearch_facet_constraint_multiple(self): search.search( q='Test', - facet_constraints={'author': "'John Smith','Mark Smith'", - 'category': "'News','Reviews'"}) + facet={'author': "'John Smith','Mark Smith'", + 'category': "'News','Reviews'"}) args = self.get_args(HTTPretty.last_request.raw_requestline) - self.assertEqual(args['facet-author-constraints'], + self.assertEqual(args['facet.author'], ["'John Smith','Mark Smith'"]) - self.assertEqual(args['facet-category-constraints'], + self.assertEqual(args['facet.category'], ["'News','Reviews'"]) def test_cloudsearch_facet_sort_single(self): search = SearchConnection(endpoint=HOSTNAME) - search.search(q='Test', facet_sort={'author': 'alpha'}) - - args = self.get_args(HTTPretty.last_request.raw_requestline) - - self.assertEqual(args['facet-author-sort'], ['alpha']) - - def test_cloudsearch_facet_sort_multiple(self): - search = SearchConnection(endpoint=HOSTNAME) - - search.search(q='Test', facet_sort={'author': 'alpha', - 'cat': 'count'}) - - args = self.get_args(HTTPretty.last_request.raw_requestline) - - self.assertEqual(args['facet-author-sort'], ['alpha']) - self.assertEqual(args['facet-cat-sort'], ['count']) - - def test_cloudsearch_top_n_single(self): - search = SearchConnection(endpoint=HOSTNAME) - - search.search(q='Test', facet_top_n={'author': 5}) - - args = self.get_args(HTTPretty.last_request.raw_requestline) - - self.assertEqual(args['facet-author-top-n'], ['5']) - - def test_cloudsearch_top_n_multiple(self): - search = SearchConnection(endpoint=HOSTNAME) - - search.search(q='Test', facet_top_n={'author': 5, 'cat': 10}) + search.search(q='Test', facet={'author': {'sort':'alpha'}}) args = self.get_args(HTTPretty.last_request.raw_requestline) - self.assertEqual(args['facet-author-top-n'], ['5']) - self.assertEqual(args['facet-cat-top-n'], ['10']) - - def test_cloudsearch_rank_single(self): - search = SearchConnection(endpoint=HOSTNAME) + print args - search.search(q='Test', rank=["date"]) + self.assertEqual(args['facet.author'], ['{"sort": "alpha"}']) - args = self.get_args(HTTPretty.last_request.raw_requestline) - - self.assertEqual(args['rank'], ['date']) - - def test_cloudsearch_rank_multiple(self): + def test_cloudsearch_facet_sort_multiple(self): search = SearchConnection(endpoint=HOSTNAME) - search.search(q='Test', rank=["date", "score"]) + search.search(q='Test', facet={'author': {'sort': 'alpha'}, + 'cat': {'sort': 'count'}}) args = self.get_args(HTTPretty.last_request.raw_requestline) - self.assertEqual(args['rank'], ['date,score']) + self.assertEqual(args['facet.author'], ['{"sort": "alpha"}']) + self.assertEqual(args['facet.cat'], ['{"sort": "count"}']) def test_cloudsearch_result_fields_single(self): search = SearchConnection(endpoint=HOSTNAME) @@ -228,7 +188,7 @@ def test_cloudsearch_result_fields_single(self): args = self.get_args(HTTPretty.last_request.raw_requestline) - self.assertEqual(args['return-fields'], ['author']) + self.assertEqual(args['return'], ['author']) def test_cloudsearch_result_fields_multiple(self): search = SearchConnection(endpoint=HOSTNAME) @@ -237,28 +197,7 @@ def test_cloudsearch_result_fields_multiple(self): args = self.get_args(HTTPretty.last_request.raw_requestline) - self.assertEqual(args['return-fields'], ['author,title']) - - - def test_cloudsearch_t_field_single(self): - search = SearchConnection(endpoint=HOSTNAME) - - search.search(q='Test', t={'year':'2001..2007'}) - - args = self.get_args(HTTPretty.last_request.raw_requestline) - - self.assertEqual(args['t-year'], ['2001..2007']) - - def test_cloudsearch_t_field_multiple(self): - search = SearchConnection(endpoint=HOSTNAME) - - search.search(q='Test', t={'year':'2001..2007', 'score':'10..50'}) - - args = self.get_args(HTTPretty.last_request.raw_requestline) - - self.assertEqual(args['t-year'], ['2001..2007']) - self.assertEqual(args['t-score'], ['10..50']) - + self.assertEqual(args['return'], ['author,title']) def test_cloudsearch_results_meta(self): """Check returned metadata is parsed correctly""" @@ -267,8 +206,8 @@ def test_cloudsearch_results_meta(self): results = search.search(q='Test') # These rely on the default response which is fed into HTTPretty - self.assertEqual(results.rank, "-text_relevance") - self.assertEqual(results.match_expression, "Test") + self.assertEqual(results.hits, 30) + self.assertEqual(results.docs[0]['fields']['rank'], 1) def test_cloudsearch_results_info(self): """Check num_pages_needed is calculated correctly""" @@ -345,14 +284,14 @@ class CloudSearchSearchFacetTest(CloudSearchSearchBaseTest): 'start': 0, 'hit':CloudSearchSearchBaseTest.hits }, - 'info': { + 'status': { 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08', 'time-ms': 2, 'cpu-time-ms': 0 }, 'facets': { 'tags': {}, - 'animals': {'constraints': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value':'lions'}]}, + 'animals': {'buckets': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value':'lions'}]}, } } @@ -361,7 +300,7 @@ def test_cloudsearch_search_facets(self): search = SearchConnection(endpoint=HOSTNAME) - results = search.search(q='Test', facet=['tags']) + results = search.search(q='Test', facet={'tags': {}}) self.assertTrue('tags' not in results.facets) self.assertEqual(results.facets['animals'], {u'lions': u'1', u'fish': u'2'}) From 4a8379b65275d24d357b33058129e7c5d97e458f Mon Sep 17 00:00:00 2001 From: "Daniel G. Taylor" Date: Fri, 18 Apr 2014 10:19:12 -0700 Subject: [PATCH 49/60] Documentation Updates * Fix class/module names in the docs files for cloudsearch and cloudsearch2 * Remove param doc comments that no longer exist * Normalize the parameter help text for domain_name --- boto/cloudsearch2/domain.py | 11 +------ docs/source/ref/cloudsearch.rst | 49 ++++++++++++++++++-------------- docs/source/ref/cloudsearch2.rst | 49 ++++++++++++++------------------ 3 files changed, 50 insertions(+), 59 deletions(-) diff --git a/boto/cloudsearch2/domain.py b/boto/cloudsearch2/domain.py index 0643eaf8fc..13aff2b642 100644 --- a/boto/cloudsearch2/domain.py +++ b/boto/cloudsearch2/domain.py @@ -232,7 +232,7 @@ def index_documents(self): Tells the search domain to start indexing its documents using the latest text processing options and IndexFields. This operation must be invoked to make options whose OptionStatus - has OptioState of RequiresIndexDocuments visible in search + has OptionState of RequiresIndexDocuments visible in search results. """ self.layer1.index_documents(self.name) @@ -261,15 +261,6 @@ def create_index_field(self, field_name, field_type, Defines an ``IndexField``, either replacing an existing definition or creating a new one. - :type domain_name: string - :param domain_name: A string that represents the name of a - domain. Domain names must be unique across the domains - owned by an account within an AWS region. Domain names - must start with a letter or number and can contain the - following characters: a-z (lowercase), 0-9, and - - (hyphen). Uppercase letters and underscores are not - allowed. - :type field_name: string :param field_name: The name of a field in the search index. diff --git a/docs/source/ref/cloudsearch.rst b/docs/source/ref/cloudsearch.rst index 98d4a22403..bac2d86663 100644 --- a/docs/source/ref/cloudsearch.rst +++ b/docs/source/ref/cloudsearch.rst @@ -4,51 +4,58 @@ Cloudsearch =========== -boto.cloudsearch2 ------------------ +boto.cloudsearch +---------------- -.. automodule:: boto.cloudsearch2 +.. automodule:: boto.cloudsearch :members: :undoc-members: -boto.cloudsearch2.domain ------------------------- +boto.cloudsearch.domain +----------------------- -.. automodule:: boto.cloudsearch2.domain +.. automodule:: boto.cloudsearch.domain :members: :undoc-members: -boto.cloudsearch2.layer1 ------------------------- +boto.cloudsearch.exceptions +----------------------- -.. automodule:: boto.cloudsearch2.layer1 +.. automodule:: boto.cloudsearch.exceptions :members: :undoc-members: -boto.cloudsearch2.layer2 ------------------------- +boto.cloudsearch.layer1 +----------------------- -.. automodule:: boto.cloudsearch2.layer2 +.. automodule:: boto.cloudsearch.layer1 :members: :undoc-members: -boto.cloudsearch2.optionstatus ------------------------------- +boto.cloudsearch.layer2 +----------------------- -.. automodule:: boto.cloudsearch2.optionstatus +.. automodule:: boto.cloudsearch.layer2 :members: :undoc-members: -boto.cloudsearch2.search ------------------------- +boto.cloudsearch.optionstatus +----------------------------- -.. automodule:: boto.cloudsearch2.search +.. automodule:: boto.cloudsearch.optionstatus :members: :undoc-members: -boto.cloudsearch2.document --------------------------- +boto.cloudsearch.search +----------------------- -.. automodule:: boto.cloudsearch2.document +.. automodule:: boto.cloudsearch.search + :members: + :undoc-members: + +boto.cloudsearch.document +------------------------- + +.. automodule:: boto.cloudsearch.document :members: :undoc-members: diff --git a/docs/source/ref/cloudsearch2.rst b/docs/source/ref/cloudsearch2.rst index bac2d86663..98d4a22403 100644 --- a/docs/source/ref/cloudsearch2.rst +++ b/docs/source/ref/cloudsearch2.rst @@ -4,58 +4,51 @@ Cloudsearch =========== -boto.cloudsearch ----------------- +boto.cloudsearch2 +----------------- -.. automodule:: boto.cloudsearch +.. automodule:: boto.cloudsearch2 :members: :undoc-members: -boto.cloudsearch.domain ------------------------ +boto.cloudsearch2.domain +------------------------ -.. automodule:: boto.cloudsearch.domain +.. automodule:: boto.cloudsearch2.domain :members: :undoc-members: -boto.cloudsearch.exceptions ------------------------ +boto.cloudsearch2.layer1 +------------------------ -.. automodule:: boto.cloudsearch.exceptions +.. automodule:: boto.cloudsearch2.layer1 :members: :undoc-members: -boto.cloudsearch.layer1 ------------------------ +boto.cloudsearch2.layer2 +------------------------ -.. automodule:: boto.cloudsearch.layer1 +.. automodule:: boto.cloudsearch2.layer2 :members: :undoc-members: -boto.cloudsearch.layer2 ------------------------ +boto.cloudsearch2.optionstatus +------------------------------ -.. automodule:: boto.cloudsearch.layer2 +.. automodule:: boto.cloudsearch2.optionstatus :members: :undoc-members: -boto.cloudsearch.optionstatus ------------------------------ +boto.cloudsearch2.search +------------------------ -.. automodule:: boto.cloudsearch.optionstatus +.. automodule:: boto.cloudsearch2.search :members: :undoc-members: -boto.cloudsearch.search ------------------------ +boto.cloudsearch2.document +-------------------------- -.. automodule:: boto.cloudsearch.search - :members: - :undoc-members: - -boto.cloudsearch.document -------------------------- - -.. automodule:: boto.cloudsearch.document +.. automodule:: boto.cloudsearch2.document :members: :undoc-members: From a8f775ae9ecb7d9a9c6fd7a13db1040996f10773 Mon Sep 17 00:00:00 2001 From: Aron Rosenberg Date: Fri, 18 Apr 2014 15:49:26 -0700 Subject: [PATCH 50/60] Fix copyright year --- boto/cloudsearch2/search.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/boto/cloudsearch2/search.py b/boto/cloudsearch2/search.py index bfeca5a94d..52dbfca5ff 100644 --- a/boto/cloudsearch2/search.py +++ b/boto/cloudsearch2/search.py @@ -1,5 +1,4 @@ -# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ -# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. # All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a From f9150eef3a56cc2f07dda4d2c33da73f4a5e6028 Mon Sep 17 00:00:00 2001 From: Aron Rosenberg Date: Fri, 18 Apr 2014 15:49:53 -0700 Subject: [PATCH 51/60] Use isinstance instead of type() --- boto/cloudsearch2/layer1.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/boto/cloudsearch2/layer1.py b/boto/cloudsearch2/layer1.py index 2604ee97ec..6c9b203df6 100644 --- a/boto/cloudsearch2/layer1.py +++ b/boto/cloudsearch2/layer1.py @@ -760,10 +760,10 @@ def build_complex_param(self, params, label, value): :param value: The value to serialize """ for k, v in value.items(): - if type(v) in [dict]: + if isinstance(v, dict): for k2, v2 in v.items(): self.build_complex_param(params, label + '.' + k, v) - elif type(v) in [bool]: + elif isinstance(v, bool): params['%s.%s' % (label, k)] = v and 'true' or 'false' else: params['%s.%s' % (label, k)] = v From 3f646f41192dc06e5f8d62133c2c5b4d7c60f204 Mon Sep 17 00:00:00 2001 From: Aron Rosenberg Date: Fri, 18 Apr 2014 15:52:43 -0700 Subject: [PATCH 52/60] Remove unused wait_for_state function --- boto/cloudsearch2/optionstatus.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/boto/cloudsearch2/optionstatus.py b/boto/cloudsearch2/optionstatus.py index 9531ca859b..eca1c9d552 100644 --- a/boto/cloudsearch2/optionstatus.py +++ b/boto/cloudsearch2/optionstatus.py @@ -22,7 +22,6 @@ # IN THE SOFTWARE. # -import time from boto.compat import json @@ -102,15 +101,6 @@ def save(self): data = self.save_fn(self.domain.name, self.to_json()) self.refresh(data) - def wait_for_state(self, state): - """ - Performs polling of CloudSearch to wait for the ``state`` - of this object to change to the provided state. - """ - while self.state != state: - time.sleep(5) - self.refresh() - class IndexFieldStatus(OptionStatus): def save(self): From ff129c2873a4b1013b0506d861e256d187dc39ab Mon Sep 17 00:00:00 2001 From: Lee-Ming Zen Date: Wed, 26 Mar 2014 00:06:25 -0600 Subject: [PATCH 53/60] Add support for Route 53 failover record sets and clean up the associated test. --- boto/route53/record.py | 25 +++++++++++++++++++++---- tests/unit/route53/test_connection.py | 22 +++++++++++++++++++++- 2 files changed, 42 insertions(+), 5 deletions(-) diff --git a/boto/route53/record.py b/boto/route53/record.py index 87359b167b..a2dbe08956 100644 --- a/boto/route53/record.py +++ b/boto/route53/record.py @@ -67,7 +67,7 @@ def __repr__(self): def add_change(self, action, name, type, ttl=600, alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, weight=None, region=None, alias_evaluate_target_health=None, - health_check=None): + health_check=None, failover=None): """ Add a change request to the set. @@ -121,20 +121,24 @@ def add_change(self, action, name, type, ttl=600, for the latency-based routing :type alias_evaluate_target_health: Boolean - :param region: *Required for alias resource record sets* Indicates + :param alias_evaluate_target_health: *Required for alias resource record sets* Indicates whether this Resource Record Set should respect the health status of any health checks associated with the ALIAS target record which it is linked to. :type health_check: str :param health_check: Health check to associate with this record + + :type failover: str + :param failover: *Failover resource record sets only* Whether this is the + primary or secondary resource record set. """ change = Record(name, type, ttl, alias_hosted_zone_id=alias_hosted_zone_id, alias_dns_name=alias_dns_name, identifier=identifier, weight=weight, region=region, alias_evaluate_target_health=alias_evaluate_target_health, - health_check=health_check) + health_check=health_check, failover=failover) self.changes.append([action, change]) return change @@ -209,6 +213,11 @@ class Record(object): %(identifier)s %(region)s """ + + FailoverBody = """ + %(identifier)s + %(failover)s + """ ResourceRecordsBody = """ %(ttl)s @@ -232,7 +241,7 @@ class Record(object): def __init__(self, name=None, type=None, ttl=600, resource_records=None, alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, weight=None, region=None, alias_evaluate_target_health=None, - health_check=None): + health_check=None, failover=None): self.name = name self.type = type self.ttl = ttl @@ -246,6 +255,7 @@ def __init__(self, name=None, type=None, ttl=600, resource_records=None, self.region = region self.alias_evaluate_target_health = alias_evaluate_target_health self.health_check = health_check + self.failover = failover def __repr__(self): return '' % (self.name, self.type, self.to_print()) @@ -293,6 +303,9 @@ def to_xml(self): elif self.identifier is not None and self.region is not None: weight = self.RRRBody % {"identifier": self.identifier, "region": self.region} + elif self.identifier is not None and self.failover is not None: + weight = self.FailoverBody % {"identifier": self.identifier, "failover": + self.failover} health_check = "" if self.health_check is not None: @@ -322,6 +335,8 @@ def to_print(self): rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight) elif self.identifier is not None and self.region is not None: rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region) + elif self.identifier is not None and self.failover is not None: + rr += ' (FAILOVER id=%s, failover=%s)' % (self.identifier, self.failover) return rr @@ -346,6 +361,8 @@ def endElement(self, name, value, connection): self.weight = value elif name == 'Region': self.region = value + elif name == 'Failover': + self.failover = value def startElement(self, name, attrs, connection): return None diff --git a/tests/unit/route53/test_connection.py b/tests/unit/route53/test_connection.py index 5859be43ae..e8957e7faa 100644 --- a/tests/unit/route53/test_connection.py +++ b/tests/unit/route53/test_connection.py @@ -240,7 +240,7 @@ def setUp(self): def default_body(self): return """ - + test.example.com. @@ -284,6 +284,18 @@ def default_body(self): example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com. + + failover.example.com. + A + failover-primary + PRIMARY + 60 + + + 10.0.0.4 + + + false 100 @@ -326,3 +338,11 @@ def test_get_all_rr_sets(self): self.assertEqual(no_evaluate_record.alias_dns_name, 'example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com.') no_evaluate_xml = no_evaluate_record.to_xml() self.assertTrue('false' in no_evaluate_xml) + + failover_record = response[4] + self.assertEqual(failover_record.name, 'failover.example.com.') + self.assertEqual(failover_record.type, 'A') + self.assertEqual(failover_record.identifier, 'failover-primary') + self.assertEqual(failover_record.failover, 'PRIMARY') + self.assertEqual(failover_record.ttl, '60') + From 9d90646ce721594728ee4c5806076db2398fc2ee Mon Sep 17 00:00:00 2001 From: "Daniel G. Taylor" Date: Wed, 30 Apr 2014 11:24:42 -0700 Subject: [PATCH 54/60] Update DynamoDB to support query filters --- boto/dynamodb2/layer1.py | 1603 ++++++++++++----- boto/dynamodb2/table.py | 78 +- tests/integration/dynamodb2/test_highlevel.py | 12 + tests/unit/dynamodb2/test_table.py | 18 +- 4 files changed, 1203 insertions(+), 508 deletions(-) diff --git a/boto/dynamodb2/layer1.py b/boto/dynamodb2/layer1.py index 44dadecb2b..9a1c4adfba 100644 --- a/boto/dynamodb2/layer1.py +++ b/boto/dynamodb2/layer1.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -37,7 +37,112 @@ class DynamoDBConnection(AWSQueryConnection): """ Amazon DynamoDB **Overview** This is the Amazon DynamoDB API Reference. This guide provides - descriptions and samples of the Amazon DynamoDB API. + descriptions and samples of the low-level DynamoDB API. For + information about DynamoDB application development, go to the + `Amazon DynamoDB Developer Guide`_. + + Instead of making the requests to the low-level DynamoDB API + directly from your application, we recommend that you use the AWS + Software Development Kits (SDKs). The easy-to-use libraries in the + AWS SDKs make it unnecessary to call the low-level DynamoDB API + directly from your application. The libraries take care of request + authentication, serialization, and connection management. For more + information, go to `Using the AWS SDKs with DynamoDB`_ in the + Amazon DynamoDB Developer Guide . + + If you decide to code against the low-level DynamoDB API directly, + you will need to write the necessary code to authenticate your + requests. For more information on signing your requests, go to + `Using the DynamoDB API`_ in the Amazon DynamoDB Developer Guide . + + The following are short descriptions of each low-level API action, + organized by function. + + **Managing Tables** + + + + + CreateTable - Creates a table with user-specified provisioned + throughput settings. You must designate one attribute as the hash + primary key for the table; you can optionally designate a second + attribute as the range primary key. DynamoDB creates indexes on + these key attributes for fast data access. Optionally, you can + create one or more secondary indexes, which provide fast data + access using non-key attributes. + + DescribeTable - Returns metadata for a table, such as table + size, status, and index information. + + UpdateTable - Modifies the provisioned throughput settings for a + table. Optionally, you can modify the provisioned throughput + settings for global secondary indexes on the table. + + ListTables - Returns a list of all tables associated with the + current AWS account and endpoint. + + DeleteTable - Deletes a table and all of its indexes. + + + + For conceptual information about managing tables, go to `Working + with Tables`_ in the Amazon DynamoDB Developer Guide . + + **Reading Data** + + + + + GetItem - Returns a set of attributes for the item that has a + given primary key. By default, GetItem performs an eventually + consistent read; however, applications can specify a strongly + consistent read instead. + + BatchGetItem - Performs multiple GetItem requests for data items + using their primary keys, from one table or multiple tables. The + response from BatchGetItem has a size limit of 1 MB and returns a + maximum of 100 items. Both eventually consistent and strongly + consistent reads can be used. + + Query - Returns one or more items from a table or a secondary + index. You must provide a specific hash key value. You can narrow + the scope of the query using comparison operators against a range + key value, or on the index key. Query supports either eventual or + strong consistency. A single response has a size limit of 1 MB. + + Scan - Reads every item in a table; the result set is eventually + consistent. You can limit the number of items returned by + filtering the data attributes, using conditional expressions. Scan + can be used to enable ad-hoc querying of a table against non-key + attributes; however, since this is a full table scan without using + an index, Scan should not be used for any application query use + case that requires predictable performance. + + + + For conceptual information about reading data, go to `Working with + Items`_ and `Query and Scan Operations`_ in the Amazon DynamoDB + Developer Guide . + + **Modifying Data** + + + + + PutItem - Creates a new item, or replaces an existing item with + a new item (including all the attributes). By default, if an item + in the table already exists with the same primary key, the new + item completely replaces the existing item. You can use + conditional operators to replace an item only if its attribute + values match certain conditions, or to insert a new item only if + that item doesn't already exist. + + UpdateItem - Modifies the attributes of an existing item. You + can also use conditional operators to perform an update only if + the item's attribute values match certain conditions. + + DeleteItem - Deletes an item in a table by primary key. You can + use conditional operators to perform a delete an item only if the + item's attribute values match certain conditions. + + BatchWriteItem - Performs multiple PutItem and DeleteItem + requests across multiple tables in a single request. A failure of + any request(s) in the batch will not cause the entire + BatchWriteItem operation to fail. Supports batches of up to 25 + items to put or delete, with a maximum total request size of 1 MB. + + + + For conceptual information about modifying data, go to `Working + with Items`_ and `Query and Scan Operations`_ in the Amazon + DynamoDB Developer Guide . """ APIVersion = "2012-08-10" DefaultRegionName = "us-east-1" @@ -91,7 +196,7 @@ def batch_get_item(self, request_items, return_consumed_capacity=None): items by primary key. A single operation can retrieve up to 1 MB of data, which can - comprise as many as 100 items. BatchGetItem will return a + contain as many as 100 items. BatchGetItem will return a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, @@ -106,24 +211,38 @@ def batch_get_item(self, request_items, return_consumed_capacity=None): include its own logic to assemble the pages of results into one dataset. - If no items can be processed because of insufficient - provisioned throughput on each of the tables involved in the - request, BatchGetItem throws - ProvisionedThroughputExceededException . + If none of the items can be processed due to insufficient + provisioned throughput on all of the tables in the request, + then BatchGetItem will throw a + ProvisionedThroughputExceededException . If at least one of + the items is successfully processed, then BatchGetItem + completes successfully, while returning the keys of the unread + items in UnprocessedKeys . + + If DynamoDB returns any unprocessed items, you should retry + the batch operation on those items. However, we strongly + recommend that you use an exponential backoff algorithm . If + you retry the batch operation immediately, the underlying read + or write requests can still fail due to throttling on the + individual tables. If you delay the batch operation using + exponential backoff, the individual requests in the batch are + much more likely to succeed. + + For more information, go to `Batch Operations and Error + Handling`_ in the Amazon DynamoDB Developer Guide. By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to `True` for any or all tables. - In order to minimize response latency, BatchGetItem fetches + In order to minimize response latency, BatchGetItem retrieves items in parallel. - When designing your application, keep in mind that Amazon - DynamoDB does not return attributes in any particular order. - To help parse the response by item, include the primary key - values for the items in your request in the AttributesToGet - parameter. + When designing your application, keep in mind that DynamoDB + does not return attributes in any particular order. To help + parse the response by item, include the primary key values for + the items in your request in the AttributesToGet parameter. If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum @@ -141,17 +260,27 @@ def batch_get_item(self, request_items, return_consumed_capacity=None): + Keys - An array of primary key attribute values that define specific - items in the table. + items in the table. For each primary key, you must provide all of + the key attributes. For example, with a hash type primary key, you + only need to specify the hash attribute. For a hash-and-range type + primary key, you must specify both the hash attribute and the range + attribute. + AttributesToGet - One or more attributes to be retrieved from the - table or index. By default, all attributes are returned. If a - specified attribute is not found, it does not appear in the result. + table. By default, all attributes are returned. If a specified + attribute is not found, it does not appear in the result. Note that + AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + ConsistentRead - If `True`, a strongly consistent read is used; if `False` (the default), an eventually consistent read is used. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. """ params = {'RequestItems': request_items, } @@ -183,27 +312,39 @@ def batch_write_item(self, request_items, return_consumed_capacity=None, unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. - To write one item, you can use the PutItem operation; to - delete one item, you can use the DeleteItem operation. + Note that if none of the items can be processed due to + insufficient provisioned throughput on all of the tables in + the request, then BatchGetItem will throw a + ProvisionedThroughputExceededException . + + If DynamoDB returns any unprocessed items, you should retry + the batch operation on those items. However, we strongly + recommend that you use an exponential backoff algorithm . If + you retry the batch operation immediately, the underlying read + or write requests can still fail due to throttling on the + individual tables. If you delay the batch operation using + exponential backoff, the individual requests in the batch are + much more likely to succeed. + + For more information, go to `Batch Operations and Error + Handling`_ in the Amazon DynamoDB Developer Guide. With BatchWriteItem , you can efficiently write or delete large amounts of data, such as from Amazon Elastic MapReduce - (EMR), or copy data from another database into Amazon - DynamoDB. In order to improve performance with these large- - scale operations, BatchWriteItem does not behave in the same - way as individual PutItem and DeleteItem calls would For - example, you cannot specify conditions on individual put and - delete requests, and BatchWriteItem does not return deleted - items in the response. + (EMR), or copy data from another database into DynamoDB. In + order to improve performance with these large-scale + operations, BatchWriteItem does not behave in the same way as + individual PutItem and DeleteItem calls would For example, you + cannot specify conditions on individual put and delete + requests, and BatchWriteItem does not return deleted items in + the response. If you use a programming language that supports concurrency, such as Java, you can use threads to write items in parallel. Your application must include the necessary logic to manage - the threads. - - With languages that don't support threading, such as PHP, - BatchWriteItem will write or delete the specified items one at - a time. In both situations, BatchWriteItem provides an + the threads. With languages that don't support threading, such + as PHP, you must update or delete the specified items one at a + time. In both situations, BatchWriteItem provides an alternative where the API performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity @@ -215,8 +356,8 @@ def batch_write_item(self, request_items, return_consumed_capacity=None, operations on nonexistent items consume one write capacity unit. - If one or more of the following is true, Amazon DynamoDB - rejects the entire batch write operation: + If one or more of the following is true, DynamoDB rejects the + entire batch write operation: + One or more tables specified in the BatchWriteItem request @@ -241,8 +382,12 @@ def batch_write_item(self, request_items, return_consumed_capacity=None, The item to be deleted is identified by a Key subelement: + Key - A map of primary key attribute values that uniquely identify - the item. Each entry in this map consists of an attribute name and - an attribute value. + the ! item. Each entry in this map consists of an attribute name + and an attribute value. For each primary key, you must provide all + of the key attributes. For example, with a hash type primary key, + you only need to specify the hash attribute. For a hash-and-range + type primary key, you must specify both the hash attribute and the + range attribute. + PutRequest - Perform a PutItem operation on the specified item. The item to be put is identified by an Item subelement: @@ -257,15 +402,17 @@ def batch_write_item(self, request_items, return_consumed_capacity=None, match those of the schema in the table's attribute definition. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type return_item_collection_metrics: string :param return_item_collection_metrics: If set to `SIZE`, statistics about item collections, if any, that were modified during the operation are returned in the response. If set to `NONE` (the - default), no statistics are returned.. + default), no statistics are returned. """ params = {'RequestItems': request_items, } @@ -286,16 +433,15 @@ def create_table(self, attribute_definitions, table_name, key_schema, the tables in different regions. CreateTable is an asynchronous operation. Upon receiving a - CreateTable request, Amazon DynamoDB immediately returns a - response with a TableStatus of `CREATING`. After the table is - created, Amazon DynamoDB sets the TableStatus to `ACTIVE`. You - can perform read and write operations only on an `ACTIVE` - table. + CreateTable request, DynamoDB immediately returns a response + with a TableStatus of `CREATING`. After the table is created, + DynamoDB sets the TableStatus to `ACTIVE`. You can perform + read and write operations only on an `ACTIVE` table. - If you want to create multiple tables with local secondary - indexes on them, you must create them sequentially. Only one - table with local secondary indexes can be in the `CREATING` - state at any given time. + If you want to create multiple tables with secondary indexes + on them, you must create them sequentially. Only one table + with secondary indexes can be in the `CREATING` state at any + given time. You can use the DescribeTable API to check the table status. @@ -308,9 +454,9 @@ def create_table(self, attribute_definitions, table_name, key_schema, :type key_schema: list :param key_schema: Specifies the attributes that make up the primary - key for the table. The attributes in KeySchema must also be defined - in the AttributeDefinitions array. For more information, see `Data - Model`_ in the Amazon DynamoDB Developer Guide. + key for a table or an index. The attributes in KeySchema must also + be defined in the AttributeDefinitions array. For more information, + see `Data Model`_ in the Amazon DynamoDB Developer Guide. Each KeySchemaElement in the array is composed of: @@ -331,18 +477,19 @@ def create_table(self, attribute_definitions, table_name, key_schema, :type local_secondary_indexes: list :param local_secondary_indexes: - One or more secondary indexes (the maximum is five) to be created on - the table. Each index is scoped to a given hash key value. There is - a 10 gigabyte size limit per hash key; otherwise, the size of a - local secondary index is unconstrained. + One or more local secondary indexes (the maximum is five) to be created + on the table. Each index is scoped to a given hash key value. There + is a 10 GB size limit per hash key; otherwise, the size of a local + secondary index is unconstrained. - Each secondary index in the array includes the following: + Each local secondary index in the array includes the following: - + IndexName - The name of the secondary index. Must be unique only for - this table. - + KeySchema - Specifies the key schema for the index. The key schema - must begin with the same hash key attribute as the table. + + IndexName - The name of the local secondary index. Must be unique + only for this table. + + KeySchema - Specifies the key schema for the local secondary index. + The key schema must begin with the same hash key attribute as the + table. + Projection - Specifies attributes that are copied (projected) from the table into the index. These are in addition to the primary key attributes and index key attributes, which are automatically @@ -358,19 +505,51 @@ def create_table(self, attribute_definitions, table_name, key_schema, + `ALL` - All of the table attributes are projected into the index. + NonKeyAttributes - A list of one or more non-key attribute names that - are projected into the index. The total count of attributes - specified in NonKeyAttributes , summed across all of the local + are projected into the secondary index. The total count of + attributes specified in NonKeyAttributes , summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total. :type global_secondary_indexes: list :param global_secondary_indexes: + One or more global secondary indexes (the maximum is five) to be + created on the table. Each global secondary index in the array + includes the following: + + + + IndexName - The name of the global secondary index. Must be unique + only for this table. + + KeySchema - Specifies the key schema for the global secondary index. + + Projection - Specifies attributes that are copied (projected) from + the table into the index. These are in addition to the primary key + attributes and index key attributes, which are automatically + projected. Each attribute specification is composed of: + + + ProjectionType - One of the following: + + + `KEYS_ONLY` - Only the index and primary keys are projected into the + index. + + `INCLUDE` - Only the specified table attributes are projected into + the index. The list of projected attributes are in NonKeyAttributes + . + + `ALL` - All of the table attributes are projected into the index. + + + NonKeyAttributes - A list of one or more non-key attribute names that + are projected into the secondary index. The total count of + attributes specified in NonKeyAttributes , summed across all of the + secondary indexes, must not exceed 20. If you project the same + attribute into two different indexes, this counts as two distinct + attributes when determining the total. + + + ProvisionedThroughput - The provisioned throughput settings for the + global secondary index, consisting of read and write capacity + units. :type provisioned_throughput: dict - :param provisioned_throughput: The provisioned throughput settings for - the specified table. The settings can be modified using the - UpdateTable operation. + :param provisioned_throughput: Represents the provisioned throughput + settings for a specified table or index. The settings can be + modified using the UpdateTable operation. For current minimum and maximum provisioned throughput values, see `Limits`_ in the Amazon DynamoDB Developer Guide. @@ -388,7 +567,8 @@ def create_table(self, attribute_definitions, table_name, key_schema, return self.make_request(action='CreateTable', body=json.dumps(params)) - def delete_item(self, table_name, key, expected=None, return_values=None, + def delete_item(self, table_name, key, expected=None, + conditional_operator=None, return_values=None, return_consumed_capacity=None, return_item_collection_metrics=None): """ @@ -406,8 +586,8 @@ def delete_item(self, table_name, key, expected=None, return_values=None, Conditional deletes are useful for only deleting items if specific conditions are met. If those conditions are met, - Amazon DynamoDB performs the delete. Otherwise, the item is - not deleted. + DynamoDB performs the delete. Otherwise, the item is not + deleted. :type table_name: string :param table_name: The name of the table from which to delete the item. @@ -415,50 +595,181 @@ def delete_item(self, table_name, key, expected=None, return_values=None, :type key: map :param key: A map of attribute names to AttributeValue objects, representing the primary key of the item to delete. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. :type expected: map - :param expected: A map of attribute/condition pairs. This is the - conditional block for the DeleteItem operation. All the conditions - must be met for the operation to succeed. - Expected allows you to provide an attribute name, and whether or not - Amazon DynamoDB should check to see if the attribute value already - exists; or if the attribute value exists and has a particular value - before changing it. - - Each item in Expected represents an attribute name for Amazon DynamoDB - to check, along with the following: - - - + Value - The attribute value for Amazon DynamoDB to check. - + Exists - Causes Amazon DynamoDB to evaluate the value before - attempting a conditional operation: - - + If Exists is `True`, Amazon DynamoDB will check to see if that - attribute value already exists in the table. If it is found, then - the operation succeeds. If it is not found, the operation fails - with a ConditionalCheckFailedException . - + If Exists is `False`, Amazon DynamoDB assumes that the attribute - value does not exist in the table. If in fact the value does not - exist, then the assumption is valid and the operation succeeds. If - the value is found, despite the assumption that it does not exist, - the operation fails with a ConditionalCheckFailedException . - The default setting for Exists is `True`. If you supply a Value all by - itself, Amazon DynamoDB assumes the attribute exists: You don't - have to set Exists to `True`, because it is implied. Amazon - DynamoDB returns a ValidationException if: - - + Exists is `True` but there is no Value to check. (You expect a value - to exist, but don't specify what that value is.) - + Exists is `False` but you also specify a Value . (You cannot expect - an attribute to have a value, while also expecting it not to - exist.) - - - - If you specify more than one condition for Exists , then all of the - conditions must evaluate to true. (In other words, the conditions - are ANDed together.) Otherwise, the conditional operation will - fail. + :param expected: + A map of attribute/condition pairs. This is the conditional block for + the DeleteItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Each item in Expected represents an attribute name for DynamoDB to + check, along with an AttributeValueList and a ComparisonOperator : + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. AttributeValueList can contain only one AttributeValue + of type String, Number, Binary, String Set, Number Set, or Binary + Set. If an item contains an AttributeValue of a different type than + the one specified in the request, the value does not match. For + example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` + does not equal `{"NS":["6", "2", "1"]}`. >
  • + + `NE` : Not equal. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. + + `NULL` : The attribute does not exist. + + `CONTAINS` : checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue of type + String, Number, or Binary (not a set). If the target attribute of + the comparison is a String, then the operation checks for a + substring match. If the target attribute of the comparison is + Binary, then the operation looks for a subsequence of the target + that matches the input. If the target attribute of the comparison + is a set ("SS", "NS", or "BS"), then the operation checks for a + member of the set (not as a substring). + + `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If + the target attribute of the comparison is a String, then the + operation checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operation + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set + ("SS", "NS", or "BS"), then the operation checks for the absence of + a member of the set (not as a substring). + + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set). The target attribute of the comparison must be a String or + Binary (not a Number or a set). >
  • + + `IN` : checks for exact matches. AttributeValueList can contain more + than one AttributeValue of type String, Number, or Binary (not a + set). The target attribute of the comparison must be of the same + type and exact value to match. A String never matches a String set. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set). A target attribute matches if the target value + is greater than, or equal to, the first element and less than, or + equal to, the second element. If an item contains an AttributeValue + of a different type than the one specified in the request, the + value does not match. For example, `{"S":"6"}` does not compare to + `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", + "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Conditional Expressions`_ in the Amazon DynamoDB Developer Guide. + + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - Causes DynamoDB to evaluate the value before attempting the + conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + + + + Even though DynamoDB continues to accept the Value and Exists + parameters, they are now deprecated. We recommend that you use + AttributeValueList and ComparisonOperator instead, since they allow + you to construct a much wider range of conditions. + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . If you attempt to use + both sets of parameters at once, DynamoDB will throw a + ValidationException . + + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the Expected map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. :type return_values: string :param return_values: @@ -472,20 +783,24 @@ def delete_item(self, table_name, key, expected=None, return_values=None, + `ALL_OLD` - The content of the old item is returned. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type return_item_collection_metrics: string :param return_item_collection_metrics: If set to `SIZE`, statistics about item collections, if any, that were modified during the operation are returned in the response. If set to `NONE` (the - default), no statistics are returned.. + default), no statistics are returned. """ params = {'TableName': table_name, 'Key': key, } if expected is not None: params['Expected'] = expected + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator if return_values is not None: params['ReturnValues'] = return_values if return_consumed_capacity is not None: @@ -499,20 +814,20 @@ def delete_table(self, table_name): """ The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in - the `DELETING` state until Amazon DynamoDB completes the - deletion. If the table is in the `ACTIVE` state, you can - delete it. If a table is in `CREATING` or `UPDATING` states, - then Amazon DynamoDB returns a ResourceInUseException . If the - specified table does not exist, Amazon DynamoDB returns a - ResourceNotFoundException . If table is already in the - `DELETING` state, no error is returned. - - Amazon DynamoDB might continue to accept data read and write + the `DELETING` state until DynamoDB completes the deletion. If + the table is in the `ACTIVE` state, you can delete it. If a + table is in `CREATING` or `UPDATING` states, then DynamoDB + returns a ResourceInUseException . If the specified table does + not exist, DynamoDB returns a ResourceNotFoundException . If + table is already in the `DELETING` state, no error is + returned. + + DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem , on a table in the `DELETING` state until the table deletion is complete. - When you delete a table, any local secondary indexes on that - table are also deleted. + When you delete a table, any indexes on that table are also + deleted. Use the DescribeTable API to check the status of the table. @@ -557,12 +872,20 @@ def get_item(self, table_name, key, attributes_to_get=None, :type key: map :param key: A map of attribute names to AttributeValue objects, representing the primary key of the item to retrieve. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. :type attributes_to_get: list :param attributes_to_get: The names of one or more attributes to retrieve. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. :type consistent_read: boolean :param consistent_read: If set to `True`, then the operation uses @@ -570,9 +893,11 @@ def get_item(self, table_name, key, attributes_to_get=None, are used. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. """ params = {'TableName': table_name, 'Key': key, } @@ -587,17 +912,19 @@ def get_item(self, table_name, key, attributes_to_get=None, def list_tables(self, exclusive_start_table_name=None, limit=None): """ - Returns an array of all the tables associated with the current - account and endpoint. + Returns an array of table names associated with the current + account and endpoint. The output from ListTables is paginated, + with each page returning a maximum of 100 table names. :type exclusive_start_table_name: string - :param exclusive_start_table_name: The name of the table that starts - the list. If you already ran a ListTables operation and received a - LastEvaluatedTableName value in the response, use that value here - to continue the list. + :param exclusive_start_table_name: The first table name that this + operation will evaluate. Use the value that was returned for + LastEvaluatedTableName in a previous operation, so that you can + obtain the next page of results. :type limit: integer - :param limit: A maximum number of table names to return. + :param limit: A maximum number of table names to return. If this + parameter is not specified, the limit is 100. """ params = {} @@ -610,7 +937,8 @@ def list_tables(self, exclusive_start_table_name=None, limit=None): def put_item(self, table_name, item, expected=None, return_values=None, return_consumed_capacity=None, - return_item_collection_metrics=None): + return_item_collection_metrics=None, + conditional_operator=None): """ Creates a new item, or replaces an old item with a new item. If an item already exists in the specified table with the same @@ -635,8 +963,8 @@ def put_item(self, table_name, item, expected=None, return_values=None, description. To prevent a new item from replacing an existing item, use a - conditional put operation with Exists set to `False` for the - primary key attribute, or attributes. + conditional put operation with ComparisonOperator set to + `NULL` for the primary key attribute, or attributes. For more information about using this API, see `Working with Items`_ in the Amazon DynamoDB Developer Guide. @@ -648,6 +976,11 @@ def put_item(self, table_name, item, expected=None, return_values=None, :param item: A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item. + You must provide all of the attributes for the primary key. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition. @@ -658,48 +991,161 @@ def put_item(self, table_name, item, expected=None, return_values=None, Each element in the Item map is an AttributeValue object. :type expected: map - :param expected: A map of attribute/condition pairs. This is the - conditional block for the PutItem operation. All the conditions - must be met for the operation to succeed. - Expected allows you to provide an attribute name, and whether or not - Amazon DynamoDB should check to see if the attribute value already - exists; or if the attribute value exists and has a particular value - before changing it. - - Each item in Expected represents an attribute name for Amazon DynamoDB - to check, along with the following: - - - + Value - The attribute value for Amazon DynamoDB to check. - + Exists - Causes Amazon DynamoDB to evaluate the value before - attempting a conditional operation: - - + If Exists is `True`, Amazon DynamoDB will check to see if that - attribute value already exists in the table. If it is found, then - the operation succeeds. If it is not found, the operation fails - with a ConditionalCheckFailedException . - + If Exists is `False`, Amazon DynamoDB assumes that the attribute - value does not exist in the table. If in fact the value does not - exist, then the assumption is valid and the operation succeeds. If - the value is found, despite the assumption that it does not exist, - the operation fails with a ConditionalCheckFailedException . - The default setting for Exists is `True`. If you supply a Value all by - itself, Amazon DynamoDB assumes the attribute exists: You don't - have to set Exists to `True`, because it is implied. Amazon - DynamoDB returns a ValidationException if: - - + Exists is `True` but there is no Value to check. (You expect a value - to exist, but don't specify what that value is.) - + Exists is `False` but you also specify a Value . (You cannot expect - an attribute to have a value, while also expecting it not to - exist.) - - - - If you specify more than one condition for Exists , then all of the - conditions must evaluate to true. (In other words, the conditions - are ANDed together.) Otherwise, the conditional operation will - fail. + :param expected: + A map of attribute/condition pairs. This is the conditional block for + the PutItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Each item in Expected represents an attribute name for DynamoDB to + check, along with an AttributeValueList and a ComparisonOperator : + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. AttributeValueList can contain only one AttributeValue + of type String, Number, Binary, String Set, Number Set, or Binary + Set. If an item contains an AttributeValue of a different type than + the one specified in the request, the value does not match. For + example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` + does not equal `{"NS":["6", "2", "1"]}`. >
  • + + `NE` : Not equal. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. + + `NULL` : The attribute does not exist. + + `CONTAINS` : checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue of type + String, Number, or Binary (not a set). If the target attribute of + the comparison is a String, then the operation checks for a + substring match. If the target attribute of the comparison is + Binary, then the operation looks for a subsequence of the target + that matches the input. If the target attribute of the comparison + is a set ("SS", "NS", or "BS"), then the operation checks for a + member of the set (not as a substring). + + `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If + the target attribute of the comparison is a String, then the + operation checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operation + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set + ("SS", "NS", or "BS"), then the operation checks for the absence of + a member of the set (not as a substring). + + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set). The target attribute of the comparison must be a String or + Binary (not a Number or a set). >
  • + + `IN` : checks for exact matches. AttributeValueList can contain more + than one AttributeValue of type String, Number, or Binary (not a + set). The target attribute of the comparison must be of the same + type and exact value to match. A String never matches a String set. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set). A target attribute matches if the target value + is greater than, or equal to, the first element and less than, or + equal to, the second element. If an item contains an AttributeValue + of a different type than the one specified in the request, the + value does not match. For example, `{"S":"6"}` does not compare to + `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", + "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Conditional Expressions`_ in the Amazon DynamoDB Developer Guide. + + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - Causes DynamoDB to evaluate the value before attempting the + conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + + + + Even though DynamoDB continues to accept the Value and Exists + parameters, they are now deprecated. We recommend that you use + AttributeValueList and ComparisonOperator instead, since they allow + you to construct a much wider range of conditions. + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . If you attempt to use + both sets of parameters at once, DynamoDB will throw a + ValidationException . :type return_values: string :param return_values: @@ -714,15 +1160,31 @@ def put_item(self, table_name, item, expected=None, return_values=None, the content of the old item is returned. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type return_item_collection_metrics: string :param return_item_collection_metrics: If set to `SIZE`, statistics about item collections, if any, that were modified during the operation are returned in the response. If set to `NONE` (the - default), no statistics are returned.. + default), no statistics are returned. + + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the Expected map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. """ params = {'TableName': table_name, 'Item': item, } @@ -734,13 +1196,16 @@ def put_item(self, table_name, item, expected=None, return_values=None, params['ReturnConsumedCapacity'] = return_consumed_capacity if return_item_collection_metrics is not None: params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator return self.make_request(action='PutItem', body=json.dumps(params)) - def query(self, table_name, index_name=None, select=None, + def query(self, table_name, key_conditions, index_name=None, select=None, attributes_to_get=None, limit=None, consistent_read=None, - key_conditions=None, scan_index_forward=None, - exclusive_start_key=None, return_consumed_capacity=None): + query_filter=None, conditional_operator=None, + scan_index_forward=None, exclusive_start_key=None, + return_consumed_capacity=None): """ A Query operation directly accesses items from a table using the table primary key, or from an index using the index key. @@ -761,15 +1226,20 @@ def query(self, table_name, index_name=None, select=None, and a LastEvaluatedKey . The LastEvaluatedKey is only provided if the results exceed 1 MB, or if you have used Limit . - To request a strongly consistent result, set ConsistentRead to - true. + You can query a table, a local secondary index, or a global + secondary index. For a query on a table or on a local + secondary index, you can set ConsistentRead to true and obtain + a strongly consistent result. Global secondary indexes support + eventually consistent reads only, so do not specify + ConsistentRead when querying a global secondary index. :type table_name: string :param table_name: The name of the table containing the requested items. :type index_name: string - :param index_name: The name of an index on the table to query. + :param index_name: The name of an index to query. This can be any local + secondary index or global secondary index on the table. :type select: string :param select: The attributes to be returned in the result. You can @@ -777,31 +1247,35 @@ def query(self, table_name, index_name=None, select=None, of matching items, or in the case of an index, some or all of the attributes projected into the index. - + `ALL_ATTRIBUTES`: Returns all of the item attributes. For a table, - this is the default. For an index, this mode causes Amazon DynamoDB - to fetch the full item from the table for each matching item in the - index. If the index is configured to project all item attributes, - the matching items will not be fetched from the table. Fetching - items from the table incurs additional throughput cost and latency. + + `ALL_ATTRIBUTES`: Returns all of the item attributes from the + specified table or index. If you are querying a local secondary + index, then for each matching item in the index DynamoDB will fetch + the entire item from the parent table. If the index is configured + to project all item attributes, then all of the data can be + obtained from the local secondary index, and no fetching is + required.. + `ALL_PROJECTED_ATTRIBUTES`: Allowed only when querying an index. Retrieves all attributes which have been projected into the index. If the index is configured to project all attributes, this is - equivalent to specifying ALL_ATTRIBUTES . + equivalent to specifying `ALL_ATTRIBUTES`. + `COUNT`: Returns the number of matching items, rather than the matching items themselves. + `SPECIFIC_ATTRIBUTES` : Returns only the attributes listed in AttributesToGet . This is equivalent to specifying AttributesToGet - without specifying any value for Select . If you are querying an - index and request only attributes that are projected into that - index, the operation will read only the index and not the table. If - any of the requested attributes are not projected into the index, - Amazon DynamoDB will need to fetch each matching item from the - table. This extra fetching incurs additional throughput cost and - latency. - - - When neither Select nor AttributesToGet are specified, Amazon DynamoDB - defaults to `ALL_ATTRIBUTES` when accessing a table, and + without specifying any value for Select . If you are querying a + local secondary index and request only attributes that are + projected into that index, the operation will read only the index + and not the table. If any of the requested attributes are not + projected into the local secondary index, DynamoDB will fetch each + of these attributes from the parent table. This extra fetching + incurs additional throughput cost and latency. If you are querying + a global secondary index, you can only request attributes that are + projected into the index. Global secondary index queries cannot + fetch attributes from the parent table. + + + If neither Select nor AttributesToGet are specified, DynamoDB defaults + to `ALL_ATTRIBUTES` when accessing a table, and `ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use both Select and AttributesToGet together in a single request, unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage @@ -813,75 +1287,87 @@ def query(self, table_name, index_name=None, select=None, retrieve. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. - If you are querying an index and request only attributes that are - projected into that index, the operation will read only the index - and not the table. If any of the requested attributes are not - projected into the index, Amazon DynamoDB will need to fetch each - matching item from the table. This extra fetching incurs additional - throughput cost and latency. + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. You cannot use both AttributesToGet and Select together in a Query request, unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage is equivalent to specifying AttributesToGet without any value for Select .) + If you are querying a local secondary index and request only attributes + that are projected into that index, the operation will read only + the index and not the table. If any of the requested attributes are + not projected into the local secondary index, DynamoDB will fetch + each of these attributes from the parent table. This extra fetching + incurs additional throughput cost and latency. + + If you are querying a global secondary index, you can only request + attributes that are projected into the index. Global secondary + index queries cannot fetch attributes from the parent table. + :type limit: integer :param limit: The maximum number of items to evaluate (not necessarily - the number of matching items). If Amazon DynamoDB processes the - number of items up to the limit while processing the results, it - stops the operation and returns the matching values up to that - point, and a LastEvaluatedKey to apply in a subsequent operation, - so that you can pick up where you left off. Also, if the processed - data set size exceeds 1 MB before Amazon DynamoDB reaches this - limit, it stops the operation and returns the matching values up to - the limit, and a LastEvaluatedKey to apply in a subsequent - operation to continue the operation. For more information see - `Query and Scan`_ in the Amazon DynamoDB Developer Guide. + the number of matching items). If DynamoDB processes the number of + items up to the limit while processing the results, it stops the + operation and returns the matching values up to that point, and a + LastEvaluatedKey to apply in a subsequent operation, so that you + can pick up where you left off. Also, if the processed data set + size exceeds 1 MB before DynamoDB reaches this limit, it stops the + operation and returns the matching values up to the limit, and a + LastEvaluatedKey to apply in a subsequent operation to continue the + operation. For more information, see `Query and Scan`_ in the + Amazon DynamoDB Developer Guide. :type consistent_read: boolean :param consistent_read: If set to `True`, then the operation uses strongly consistent reads; otherwise, eventually consistent reads are used. + Strongly consistent reads are not supported on global secondary + indexes. If you query a global secondary index with ConsistentRead + set to `True`, you will receive an error message. :type key_conditions: map - :param key_conditions: - The selection criteria for the query. - + :param key_conditions: The selection criteria for the query. For a query on a table, you can only have conditions on the table primary key attributes. You must specify the hash key attribute name and value as an `EQ` condition. You can optionally specify a second condition, referring to the range key attribute. - For a query on a secondary index, you can only have conditions on the - index key attributes. You must specify the index hash attribute - name and value as an EQ condition. You can optionally specify a - second condition, referring to the index key range attribute. + For a query on an index, you can only have conditions on the index key + attributes. You must specify the index hash attribute name and + value as an EQ condition. You can optionally specify a second + condition, referring to the index key range attribute. - Multiple conditions are evaluated using "AND"; in other words, all of - the conditions must be met in order for an item to appear in the - results results. + If you specify more than one condition in the KeyConditions map, then + by default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) Each KeyConditions element consists of an attribute name to compare, along with the following: + AttributeValueList - One or more values to evaluate against the - supplied attribute. This list contains exactly one value, except - for a `BETWEEN` or `IN` comparison, in which case the list contains - two values. For type Number, value comparisons are numeric. String - value comparisons for greater than, equals, or less than are based - on ASCII character code values. For example, `a` is greater than - `A`, and `aa` is greater than `B`. For a list of code values, see + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. - For Binary, Amazon DynamoDB treats each byte of the binary data as + For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values, for example when evaluating query expressions. + ComparisonOperator - A comparator for evaluating attributes. For - example, equals, greater than, less than, etc. Valid comparison - operators for Query: `EQ | LE | LT | GE | GT | BEGINS_WITH | - BETWEEN` For information on specifying data types in JSON, see - `JSON Data Format`_ in the Amazon DynamoDB Developer Guide. The - following are descriptions of each comparison operator. + example, equals, greater than, less than, etc. For KeyConditions , + only the following comparison operators are supported: `EQ | LE | + LT | GE | GT | BEGINS_WITH | BETWEEN` The following are + descriptions of these comparison operators. + `EQ` : Equal. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set). If an item contains @@ -890,33 +1376,33 @@ def query(self, table_name, index_name=None, select=None, not equal `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", "1"]}`. + `LE` : Less than or equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + `LT` : Less than. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + `GE` : Greater than or equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + `GT` : Greater than. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain - only one AttributeValue of type String or Binary (not a Number or a - set). The target attribute of the comparison must be a String or - Binary (not a Number or a set). + only one AttributeValue of type String or Binary (not a Number or a + set). The target attribute of the comparison must be a String or + Binary (not a Number or a set). >
  • + `BETWEEN` : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or @@ -928,14 +1414,68 @@ def query(self, table_name, index_name=None, select=None, `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}` + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Conditional Expressions`_ in the Amazon DynamoDB Developer Guide. + + :type query_filter: map + :param query_filter: + Evaluates the query results and returns only the desired values. + + If you specify more than one condition in the QueryFilter map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + Each QueryFilter element consists of an attribute name to compare, + along with the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. For information on specifying data + types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB + Developer Guide. + + ComparisonOperator - A comparator for evaluating attributes. For + example, equals, greater than, less than, etc. The following + comparison operators are available: `EQ | NE | LE | LT | GE | GT | + NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | + BETWEEN` For complete descriptions of all comparison operators, see + `API_Condition.html`_. + + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the QueryFilter map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + :type scan_index_forward: boolean :param scan_index_forward: Specifies ascending (true) or descending - (false) traversal of the index. Amazon DynamoDB returns results - reflecting the requested order determined by the range key. If the - data type is Number, the results are returned in numeric order. For - String, the results are returned in order of ASCII character code - values. For Binary, Amazon DynamoDB treats each byte of the binary - data as unsigned when it compares binary values. + (false) traversal of the index. DynamoDB returns results reflecting + the requested order determined by the range key. If the data type + is Number, the results are returned in numeric order. For String, + the results are returned in order of ASCII character code values. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values. If ScanIndexForward is not specified, the results are returned in ascending order. @@ -947,12 +1487,17 @@ def query(self, table_name, index_name=None, select=None, No set data types are allowed. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. """ - params = {'TableName': table_name, } + params = { + 'TableName': table_name, + 'KeyConditions': key_conditions, + } if index_name is not None: params['IndexName'] = index_name if select is not None: @@ -963,8 +1508,10 @@ def query(self, table_name, index_name=None, select=None, params['Limit'] = limit if consistent_read is not None: params['ConsistentRead'] = consistent_read - if key_conditions is not None: - params['KeyConditions'] = key_conditions + if query_filter is not None: + params['QueryFilter'] = query_filter + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator if scan_index_forward is not None: params['ScanIndexForward'] = scan_index_forward if exclusive_start_key is not None: @@ -975,14 +1522,13 @@ def query(self, table_name, index_name=None, select=None, body=json.dumps(params)) def scan(self, table_name, attributes_to_get=None, limit=None, - select=None, scan_filter=None, exclusive_start_key=None, - return_consumed_capacity=None, total_segments=None, - segment=None): + select=None, scan_filter=None, conditional_operator=None, + exclusive_start_key=None, return_consumed_capacity=None, + total_segments=None, segment=None): """ The Scan operation returns one or more items and item attributes by accessing every item in the table. To have - Amazon DynamoDB return fewer items, you can provide a - ScanFilter . + DynamoDB return fewer items, you can provide a ScanFilter . If the total number of scanned items exceeds the maximum data set size limit of 1 MB, the scan stops and results are @@ -1008,160 +1554,91 @@ def scan(self, table_name, attributes_to_get=None, limit=None, retrieve. If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result. + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. :type limit: integer :param limit: The maximum number of items to evaluate (not necessarily - the number of matching items). If Amazon DynamoDB processes the - number of items up to the limit while processing the results, it - stops the operation and returns the matching values up to that - point, and a LastEvaluatedKey to apply in a subsequent operation, - so that you can pick up where you left off. Also, if the processed - data set size exceeds 1 MB before Amazon DynamoDB reaches this - limit, it stops the operation and returns the matching values up to - the limit, and a LastEvaluatedKey to apply in a subsequent - operation to continue the operation. For more information see - `Query and Scan`_ in the Amazon DynamoDB Developer Guide. + the number of matching items). If DynamoDB processes the number of + items up to the limit while processing the results, it stops the + operation and returns the matching values up to that point, and a + LastEvaluatedKey to apply in a subsequent operation, so that you + can pick up where you left off. Also, if the processed data set + size exceeds 1 MB before DynamoDB reaches this limit, it stops the + operation and returns the matching values up to the limit, and a + LastEvaluatedKey to apply in a subsequent operation to continue the + operation. For more information, see `Query and Scan`_ in the + Amazon DynamoDB Developer Guide. :type select: string :param select: The attributes to be returned in the result. You can - retrieve all item attributes, specific item attributes, the count - of matching items, or in the case of an index, some or all of the - attributes projected into the index. + retrieve all item attributes, specific item attributes, or the + count of matching items. - + `ALL_ATTRIBUTES`: Returns all of the item attributes. For a table, - this is the default. For an index, this mode causes Amazon DynamoDB - to fetch the full item from the table for each matching item in the - index. If the index is configured to project all item attributes, - the matching items will not be fetched from the table. Fetching - items from the table incurs additional throughput cost and latency. - + `ALL_PROJECTED_ATTRIBUTES`: Retrieves all attributes which have been - projected into the index. If the index is configured to project all - attributes, this is equivalent to specifying ALL_ATTRIBUTES . + + `ALL_ATTRIBUTES`: Returns all of the item attributes. + `COUNT`: Returns the number of matching items, rather than the matching items themselves. + `SPECIFIC_ATTRIBUTES` : Returns only the attributes listed in AttributesToGet . This is equivalent to specifying AttributesToGet - without specifying any value for Select . If you are querying an - index and request only attributes that are projected into that - index, the operation will read only the index and not the table. If - any of the requested attributes are not projected into the index, - Amazon DynamoDB will need to fetch each matching item from the - table. This extra fetching incurs additional throughput cost and - latency. + without specifying any value for Select . - When neither Select nor AttributesToGet are specified, Amazon DynamoDB - defaults to `ALL_ATTRIBUTES` when accessing a table, and - `ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use - both Select and AttributesToGet together in a single request, - unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage - is equivalent to specifying AttributesToGet without any value for - Select .) + If neither Select nor AttributesToGet are specified, DynamoDB defaults + to `ALL_ATTRIBUTES`. You cannot use both Select and AttributesToGet + together in a single request, unless the value for Select is + `SPECIFIC_ATTRIBUTES`. (This usage is equivalent to specifying + AttributesToGet without any value for Select .) :type scan_filter: map :param scan_filter: Evaluates the scan results and returns only the desired values. - Multiple conditions are treated as "AND" operations: all conditions - must be met to be included in the results. - Each ScanConditions element consists of an attribute name to compare, - along with the following: + If you specify more than one condition in the ScanFilter map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + Each ScanFilter element consists of an attribute name to compare, along + with the following: + AttributeValueList - One or more values to evaluate against the - supplied attribute. This list contains exactly one value, except - for a `BETWEEN` or `IN` comparison, in which case the list contains - two values. For type Number, value comparisons are numeric. String - value comparisons for greater than, equals, or less than are based - on ASCII character code values. For example, `a` is greater than - `A`, and `aa` is greater than `B`. For a list of code values, see + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. - For Binary, Amazon DynamoDB treats each byte of the binary data as + For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values, for example when - evaluating query expressions. + evaluating query expressions. For information on specifying data + types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB + Developer Guide. + ComparisonOperator - A comparator for evaluating attributes. For - example, equals, greater than, less than, etc. Valid comparison - operators for Scan: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL - | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` For - information on specifying data types in JSON, see `JSON Data - Format`_ in the Amazon DynamoDB Developer Guide. The following are - descriptions of each comparison operator. + example, equals, greater than, less than, etc. The following + comparison operators are available: `EQ | NE | LE | LT | GE | GT | + NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | + BETWEEN` For complete descriptions of all comparison operators, see + `Condition`_. - + `EQ` : Equal. AttributeValueList can contain only one AttributeValue - of type String, Number, or Binary (not a set). If an item contains - an AttributeValue of a different type than the one specified in the - request, the value does not match. For example, `{"S":"6"}` does - not equal `{"N":"6"}`. Also, `{"N":"6"}` does not equal - `{"NS":["6", "2", "1"]}`. - + `NE` : Not equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - equal `{"NS":["6", "2", "1"]}`. - + `LE` : Less than or equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. - + `LT` : Less than. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. - + `GE` : Greater than or equal. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. - + `GT` : Greater than. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If an - item contains an AttributeValue of a different type than the one - specified in the request, the value does not match. For example, - `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not - compare to `{"NS":["6", "2", "1"]}`. - + `NOT_NULL` : The attribute exists. - + `NULL` : The attribute does not exist. - + `CONTAINS` : checks for a subsequence, or value in a set. - AttributeValueList can contain only one AttributeValue of type - String, Number, or Binary (not a set). If the target attribute of - the comparison is a String, then the operation checks for a - substring match. If the target attribute of the comparison is - Binary, then the operation looks for a subsequence of the target - that matches the input. If the target attribute of the comparison - is a set ("SS", "NS", or "BS"), then the operation checks for a - member of the set (not as a substring). - + `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a - value in a set. AttributeValueList can contain only one - AttributeValue of type String, Number, or Binary (not a set). If - the target attribute of the comparison is a String, then the - operation checks for the absence of a substring match. If the - target attribute of the comparison is Binary, then the operation - checks for the absence of a subsequence of the target that matches - the input. If the target attribute of the comparison is a set - ("SS", "NS", or "BS"), then the operation checks for the absence of - a member of the set (not as a substring). - + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain - only one AttributeValue of type String or Binary (not a Number or a - set). The target attribute of the comparison must be a String or - Binary (not a Number or a set). - + `IN` : checks for exact matches. AttributeValueList can contain more - than one AttributeValue of type String, Number, or Binary (not a - set). The target attribute of the comparison must be of the same - type and exact value to match. A String never matches a String set. - + `BETWEEN` : Greater than or equal to the first value, and less than - or equal to the second value. AttributeValueList must contain two - AttributeValue elements of the same type, either String, Number, or - Binary (not a set). A target attribute matches if the target value - is greater than, or equal to, the first element and less than, or - equal to, the second element. If an item contains an AttributeValue - of a different type than the one specified in the request, the - value does not match. For example, `{"S":"6"}` does not compare to - `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", - "2", "1"]}` + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the ScanFilter map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. :type exclusive_start_key: map :param exclusive_start_key: The primary key of the first item that this @@ -1175,9 +1652,11 @@ def scan(self, table_name, attributes_to_get=None, limit=None, corresponding value of LastEvaluatedKey . :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type total_segments: integer :param total_segments: For a parallel Scan request, TotalSegments @@ -1219,6 +1698,8 @@ def scan(self, table_name, attributes_to_get=None, limit=None, params['Select'] = select if scan_filter is not None: params['ScanFilter'] = scan_filter + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator if exclusive_start_key is not None: params['ExclusiveStartKey'] = exclusive_start_key if return_consumed_capacity is not None: @@ -1231,8 +1712,8 @@ def scan(self, table_name, attributes_to_get=None, limit=None, body=json.dumps(params)) def update_item(self, table_name, key, attribute_updates=None, - expected=None, return_values=None, - return_consumed_capacity=None, + expected=None, conditional_operator=None, + return_values=None, return_consumed_capacity=None, return_item_collection_metrics=None): """ Edits an existing item's attributes, or inserts a new item if @@ -1250,8 +1731,12 @@ def update_item(self, table_name, key, attribute_updates=None, :param table_name: The name of the table containing the item to update. :type key: map - :param key: The primary key that defines the item. Each element + :param key: The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. :type attribute_updates: map :param attribute_updates: The names of attributes to be modified, the @@ -1294,16 +1779,16 @@ def update_item(self, table_name, key, attribute_updates=None, If Value is a negative number, then it is subtracted from the existing attribute. If you use `ADD` to increment or decrement a number value for an item that doesn't exist before the update, - Amazon DynamoDB uses 0 as the initial value. In addition, if you - use `ADD` to update an existing item, and intend to increment or - decrement an attribute value which does not yet exist, Amazon - DynamoDB uses `0` as the initial value. For example, suppose that - the item you want to update does not yet have an attribute named - itemcount , but you decide to `ADD` the number `3` to this - attribute anyway, even though it currently does not exist. Amazon - DynamoDB will create the itemcount attribute, set its initial value - to `0`, and finally add `3` to it. The result will be a new - itemcount attribute in the item, with a value of `3`. + DynamoDB uses 0 as the initial value. In addition, if you use `ADD` + to update an existing item, and intend to increment or decrement an + attribute value which does not yet exist, DynamoDB uses `0` as the + initial value. For example, suppose that the item you want to + update does not yet have an attribute named itemcount , but you + decide to `ADD` the number `3` to this attribute anyway, even + though it currently does not exist. DynamoDB will create the + itemcount attribute, set its initial value to `0`, and finally add + `3` to it. The result will be a new itemcount attribute in the + item, with a value of `3`. + If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the @@ -1319,13 +1804,13 @@ def update_item(self, table_name, key, attribute_updates=None, number or is a set. Do not use `ADD` for any other data types. **If no item with the specified Key is found:** - + `PUT` - Amazon DynamoDB creates a new item with the specified primary - key, and then adds the attribute. + + `PUT` - DynamoDB creates a new item with the specified primary key, + and then adds the attribute. + `DELETE` - Nothing happens; there is no attribute to delete. - + `ADD` - Amazon DynamoDB creates an item with the supplied primary key - and number (or set of numbers) for the attribute value. The only - data types allowed are number and number set; no other data types - can be specified. + + `ADD` - DynamoDB creates an item with the supplied primary key and + number (or set of numbers) for the attribute value. The only data + types allowed are number and number set; no other data types can be + specified. @@ -1334,48 +1819,175 @@ def update_item(self, table_name, key, attribute_updates=None, the table's attribute definition. :type expected: map - :param expected: A map of attribute/condition pairs. This is the - conditional block for the UpdateItem operation. All the conditions - must be met for the operation to succeed. - Expected allows you to provide an attribute name, and whether or not - Amazon DynamoDB should check to see if the attribute value already - exists; or if the attribute value exists and has a particular value - before changing it. - - Each item in Expected represents an attribute name for Amazon DynamoDB - to check, along with the following: - - - + Value - The attribute value for Amazon DynamoDB to check. - + Exists - Causes Amazon DynamoDB to evaluate the value before - attempting a conditional operation: - - + If Exists is `True`, Amazon DynamoDB will check to see if that - attribute value already exists in the table. If it is found, then - the operation succeeds. If it is not found, the operation fails - with a ConditionalCheckFailedException . - + If Exists is `False`, Amazon DynamoDB assumes that the attribute - value does not exist in the table. If in fact the value does not - exist, then the assumption is valid and the operation succeeds. If - the value is found, despite the assumption that it does not exist, - the operation fails with a ConditionalCheckFailedException . - The default setting for Exists is `True`. If you supply a Value all by - itself, Amazon DynamoDB assumes the attribute exists: You don't - have to set Exists to `True`, because it is implied. Amazon - DynamoDB returns a ValidationException if: - - + Exists is `True` but there is no Value to check. (You expect a value - to exist, but don't specify what that value is.) - + Exists is `False` but you also specify a Value . (You cannot expect - an attribute to have a value, while also expecting it not to - exist.) - - - - If you specify more than one condition for Exists , then all of the - conditions must evaluate to true. (In other words, the conditions - are ANDed together.) Otherwise, the conditional operation will - fail. + :param expected: + A map of attribute/condition pairs. This is the conditional block for + the UpdateItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Each item in Expected represents an attribute name for DynamoDB to + check, along with an AttributeValueList and a ComparisonOperator : + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `aa` is greater than `B`. For a list + of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. AttributeValueList can contain only one AttributeValue + of type String, Number, Binary, String Set, Number Set, or Binary + Set. If an item contains an AttributeValue of a different type than + the one specified in the request, the value does not match. For + example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` + does not equal `{"NS":["6", "2", "1"]}`. >
  • + + `NE` : Not equal. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If an + item contains an AttributeValue of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. + + `NULL` : The attribute does not exist. + + `CONTAINS` : checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue of type + String, Number, or Binary (not a set). If the target attribute of + the comparison is a String, then the operation checks for a + substring match. If the target attribute of the comparison is + Binary, then the operation looks for a subsequence of the target + that matches the input. If the target attribute of the comparison + is a set ("SS", "NS", or "BS"), then the operation checks for a + member of the set (not as a substring). + + `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set). If + the target attribute of the comparison is a String, then the + operation checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operation + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set + ("SS", "NS", or "BS"), then the operation checks for the absence of + a member of the set (not as a substring). + + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set). The target attribute of the comparison must be a String or + Binary (not a Number or a set). >
  • + + `IN` : checks for exact matches. AttributeValueList can contain more + than one AttributeValue of type String, Number, or Binary (not a + set). The target attribute of the comparison must be of the same + type and exact value to match. A String never matches a String set. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set). A target attribute matches if the target value + is greater than, or equal to, the first element and less than, or + equal to, the second element. If an item contains an AttributeValue + of a different type than the one specified in the request, the + value does not match. For example, `{"S":"6"}` does not compare to + `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6", + "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Conditional Expressions`_ in the Amazon DynamoDB Developer Guide. + + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - Causes DynamoDB to evaluate the value before attempting the + conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + + + + Even though DynamoDB continues to accept the Value and Exists + parameters, they are now deprecated. We recommend that you use + AttributeValueList and ComparisonOperator instead, since they allow + you to construct a much wider range of conditions. + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . If you attempt to use + both sets of parameters at once, DynamoDB will throw a + ValidationException . + + :type conditional_operator: string + :param conditional_operator: A logical operator to apply to the + conditions in the Expected map: + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. :type return_values: string :param return_values: @@ -1396,15 +2008,17 @@ def update_item(self, table_name, key, attribute_updates=None, returned. :type return_consumed_capacity: string - :param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is - included in the response; if set to `NONE` (the default), - ConsumedCapacity is not included. + :param return_consumed_capacity: If set to `TOTAL`, the response + includes ConsumedCapacity data for tables and indexes. If set to + `INDEXES`, the response includes ConsumedCapacity for indexes. If + set to `NONE` (the default), ConsumedCapacity is not included in + the response. :type return_item_collection_metrics: string :param return_item_collection_metrics: If set to `SIZE`, statistics about item collections, if any, that were modified during the operation are returned in the response. If set to `NONE` (the - default), no statistics are returned.. + default), no statistics are returned. """ params = {'TableName': table_name, 'Key': key, } @@ -1412,6 +2026,8 @@ def update_item(self, table_name, key, attribute_updates=None, params['AttributeUpdates'] = attribute_updates if expected is not None: params['Expected'] = expected + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator if return_values is not None: params['ReturnValues'] = return_values if return_consumed_capacity is not None: @@ -1427,7 +2043,7 @@ def update_table(self, table_name, provisioned_throughput=None, Updates the provisioned throughput for the given table. Setting the throughput for a table helps you manage performance and is part of the provisioned throughput feature - of Amazon DynamoDB. + of DynamoDB. The provisioned throughput values can be upgraded or downgraded based on the maximums and minimums listed in the @@ -1442,22 +2058,23 @@ def update_table(self, table_name, provisioned_throughput=None, table returns to the `ACTIVE` state after the UpdateTable operation. - You cannot add, modify or delete local secondary indexes using - UpdateTable . Local secondary indexes can only be defined at - table creation time. + You cannot add, modify or delete indexes using UpdateTable . + Indexes can only be defined at table creation time. :type table_name: string :param table_name: The name of the table to be updated. :type provisioned_throughput: dict - :param provisioned_throughput: The provisioned throughput settings for - the specified table. The settings can be modified using the - UpdateTable operation. + :param provisioned_throughput: Represents the provisioned throughput + settings for a specified table or index. The settings can be + modified using the UpdateTable operation. For current minimum and maximum provisioned throughput values, see `Limits`_ in the Amazon DynamoDB Developer Guide. :type global_secondary_index_updates: list - :param global_secondary_index_updates: + :param global_secondary_index_updates: An array of one or more global + secondary indexes on the table, together with provisioned + throughput settings for each index. """ params = {'TableName': table_name, } diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py index 9795de672d..1e5024632e 100644 --- a/boto/dynamodb2/table.py +++ b/boto/dynamodb2/table.py @@ -757,6 +757,9 @@ def _build_filters(self, filter_kwargs, using=QUERY_OPERATORS): An internal method for taking query/scan-style ``**kwargs`` & turning them into the raw structure DynamoDB expects for filtering. """ + if filter_kwargs is None: + return + filters = {} for field_and_op, value in filter_kwargs.items(): @@ -828,19 +831,22 @@ def query(self, limit=None, index=None, reverse=False, consistent=False, def query_2(self, limit=None, index=None, reverse=False, consistent=False, attributes=None, max_page_size=None, + query_filter=None, conditional_operator=None, **filter_kwargs): """ Queries for a set of matching items in a DynamoDB table. Queries can be performed against a hash key, a hash+range key or - against any data stored in your local secondary indexes. + against any data stored in your local secondary indexes. Query filters + can be used to filter on arbitrary fields. **Note** - You can not query against arbitrary fields within the data - stored in DynamoDB. + stored in DynamoDB unless you specify ``query_filter`` values. To specify the filters of the items you'd like to get, you can specify the filters as kwargs. Each filter kwarg should follow the pattern - ``__=``. + ``__=``. Query filters + are specified in the same way. Optionally accepts a ``limit`` parameter, which should be an integer count of the total number of items to return. (Default: ``None`` - @@ -869,6 +875,15 @@ def query_2(self, limit=None, index=None, reverse=False, the scan from drowning out other queries. (Default: ``None`` - fetch as many as DynamoDB will return) + Optionally accepts a ``query_filter`` which is a dictionary of filter + conditions against any arbitrary field in the returned data. + + Optionally accepts a ``conditional_operator`` which applies to the + query filter conditions: + + + `AND` - True if all filter conditions evaluate to true (default) + + `OR` - True if at least one filter condition evaluates to true + Returns a ``ResultSet``, which transparently handles the pagination of results you get back. @@ -907,6 +922,18 @@ def query_2(self, limit=None, index=None, reverse=False, 'John' 'Fred' + # Filter by non-indexed field(s) + >>> results = users.query( + ... last_name__eq='Doe', + ... reverse=True, + ... query_filter={ + ... first_name__beginswith='A' + ... } + ... ) + >>> for res in results: + ... print res['first_name'] + ' ' + res['last_name'] + 'Alice Doe' + """ if self.schema: if len(self.schema) == 1: @@ -935,20 +962,25 @@ def query_2(self, limit=None, index=None, reverse=False, 'consistent': consistent, 'select': select, 'attributes_to_get': attributes, + 'query_filter': query_filter, + 'conditional_operator': conditional_operator, }) results.to_call(self._query, **kwargs) return results - def query_count(self, index=None, consistent=False, **filter_kwargs): + def query_count(self, index=None, consistent=False, conditional_operator=None, + query_filter=None, **filter_kwargs): """ Queries the exact count of matching items in a DynamoDB table. Queries can be performed against a hash key, a hash+range key or - against any data stored in your local secondary indexes. + against any data stored in your local secondary indexes. Query filters + can be used to filter on arbitrary fields. To specify the filters of the items you'd like to get, you can specify the filters as kwargs. Each filter kwarg should follow the pattern - ``__=``. + ``__=``. Query filters + are specified in the same way. Optionally accepts an ``index`` parameter, which should be a string of name of the local secondary index you want to query against. @@ -959,6 +991,15 @@ def query_count(self, index=None, consistent=False, **filter_kwargs): the data (more expensive). (Default: ``False`` - use eventually consistent reads) + Optionally accepts a ``query_filter`` which is a dictionary of filter + conditions against any arbitrary field in the returned data. + + Optionally accepts a ``conditional_operator`` which applies to the + query filter conditions: + + + `AND` - True if all filter conditions evaluate to true (default) + + `OR` - True if at least one filter condition evaluates to true + Returns an integer which represents the exact amount of matched items. @@ -983,18 +1024,25 @@ def query_count(self, index=None, consistent=False, **filter_kwargs): using=QUERY_OPERATORS ) + built_query_filter = self._build_filters( + query_filter, + using=FILTER_OPERATORS + ) + raw_results = self.connection.query( self.table_name, index_name=index, consistent_read=consistent, select='COUNT', key_conditions=key_conditions, + query_filter=built_query_filter, + conditional_operator=conditional_operator, ) return int(raw_results.get('Count', 0)) def _query(self, limit=None, index=None, reverse=False, consistent=False, exclusive_start_key=None, select=None, attributes_to_get=None, - **filter_kwargs): + query_filter=None, conditional_operator=None, **filter_kwargs): """ The internal method that performs the actual queries. Used extensively by ``ResultSet`` to perform each (paginated) request. @@ -1005,6 +1053,7 @@ def _query(self, limit=None, index=None, reverse=False, consistent=False, 'consistent_read': consistent, 'select': select, 'attributes_to_get': attributes_to_get, + 'conditional_operator': conditional_operator, } if reverse: @@ -1023,6 +1072,11 @@ def _query(self, limit=None, index=None, reverse=False, consistent=False, using=QUERY_OPERATORS ) + kwargs['query_filter'] = self._build_filters( + query_filter, + using=FILTER_OPERATORS + ) + raw_results = self.connection.query( self.table_name, **kwargs @@ -1049,13 +1103,14 @@ def _query(self, limit=None, index=None, reverse=False, consistent=False, } def scan(self, limit=None, segment=None, total_segments=None, - max_page_size=None, attributes=None, **filter_kwargs): + max_page_size=None, attributes=None, conditional_operator=None, + **filter_kwargs): """ Scans across all items within a DynamoDB table. Scans can be performed against a hash key or a hash+range key. You can additionally filter the results after the table has been read but - before the response is returned. + before the response is returned by using query filters. To specify the filters of the items you'd like to get, you can specify the filters as kwargs. Each filter kwarg should follow the pattern @@ -1120,12 +1175,14 @@ def scan(self, limit=None, segment=None, total_segments=None, 'segment': segment, 'total_segments': total_segments, 'attributes': attributes, + 'conditional_operator': conditional_operator, }) results.to_call(self._scan, **kwargs) return results def _scan(self, limit=None, exclusive_start_key=None, segment=None, - total_segments=None, attributes=None, **filter_kwargs): + total_segments=None, attributes=None, conditional_operator=None, + **filter_kwargs): """ The internal method that performs the actual scan. Used extensively by ``ResultSet`` to perform each (paginated) request. @@ -1135,6 +1192,7 @@ def _scan(self, limit=None, exclusive_start_key=None, segment=None, 'segment': segment, 'total_segments': total_segments, 'attributes_to_get': attributes, + 'conditional_operator': conditional_operator, } if exclusive_start_key: diff --git a/tests/integration/dynamodb2/test_highlevel.py b/tests/integration/dynamodb2/test_highlevel.py index a4de0b71c9..62d689b560 100644 --- a/tests/integration/dynamodb2/test_highlevel.py +++ b/tests/integration/dynamodb2/test_highlevel.py @@ -254,6 +254,18 @@ def test_integration(self): for res in c_results: self.assertTrue(res['username'] in ['johndoe',]) + # Test a query with query filters + results = users.query_2( + username__eq='johndoe', + query_filter={ + 'first_name__beginswith': 'J' + }, + attributes=('first_name',) + ) + + for res in results: + self.assertTrue(res['first_name'] in ['John']) + # Test scans without filters. all_users = users.scan(limit=7) self.assertEqual(all_users.next()['username'], 'bob') diff --git a/tests/unit/dynamodb2/test_table.py b/tests/unit/dynamodb2/test_table.py index 1e42660c3b..af674a3e29 100644 --- a/tests/unit/dynamodb2/test_table.py +++ b/tests/unit/dynamodb2/test_table.py @@ -2206,7 +2206,9 @@ def test_private_query(self): 'ComparisonOperator': 'BETWEEN', } }, - select=None + select=None, + query_filter=None, + conditional_operator=None ) # Now alter the expected. @@ -2227,7 +2229,9 @@ def test_private_query(self): exclusive_start_key={ 'username': 'adam', }, - consistent=True + consistent=True, + query_filter=None, + conditional_operator='AND' ) usernames = [res['username'] for res in results['results']] self.assertEqual(usernames, ['johndoe', 'jane', 'alice', 'bob']) @@ -2251,7 +2255,9 @@ def test_private_query(self): }, }, consistent_read=True, - select=None + select=None, + query_filter=None, + conditional_operator='AND' ) def test_private_scan(self): @@ -2313,7 +2319,8 @@ def test_private_scan(self): limit=2, segment=None, attributes_to_get=None, - total_segments=None + total_segments=None, + conditional_operator=None ) # Now alter the expected. @@ -2356,7 +2363,8 @@ def test_private_scan(self): }, segment=None, attributes_to_get=None, - total_segments=None + total_segments=None, + conditional_operator=None ) def test_query(self): From 5022ac92f7b3aca46436f3cd5dd095e9e26d71ea Mon Sep 17 00:00:00 2001 From: "Daniel G. Taylor" Date: Wed, 30 Apr 2014 12:42:45 -0700 Subject: [PATCH 55/60] Fix dict syntax in example --- boto/dynamodb2/table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boto/dynamodb2/table.py b/boto/dynamodb2/table.py index 1e5024632e..6b142f6e78 100644 --- a/boto/dynamodb2/table.py +++ b/boto/dynamodb2/table.py @@ -927,7 +927,7 @@ def query_2(self, limit=None, index=None, reverse=False, ... last_name__eq='Doe', ... reverse=True, ... query_filter={ - ... first_name__beginswith='A' + ... 'first_name__beginswith': 'A' ... } ... ) >>> for res in results: From 8cf6ceb519c4a452b68543cfbf3c70a26702cac6 Mon Sep 17 00:00:00 2001 From: Lee-Ming Zen Date: Thu, 1 May 2014 17:45:27 -0700 Subject: [PATCH 56/60] Include resource records commit unit test for Route 53 --- tests/unit/route53/test_connection.py | 126 +++++++++++++++++++++++++- 1 file changed, 121 insertions(+), 5 deletions(-) diff --git a/tests/unit/route53/test_connection.py b/tests/unit/route53/test_connection.py index e8957e7faa..760026f254 100644 --- a/tests/unit/route53/test_connection.py +++ b/tests/unit/route53/test_connection.py @@ -21,6 +21,8 @@ # IN THE SOFTWARE. # import mock +import re +import xml.dom.minidom from boto.exception import BotoServerError from boto.route53.connection import Route53Connection @@ -32,7 +34,6 @@ from tests.unit import unittest from tests.unit import AWSMockServiceTestCase - @attr(route53=True) class TestRoute53Connection(AWSMockServiceTestCase): connection_class = Route53Connection @@ -87,7 +88,7 @@ def _wrapper(*args, **kwargs): # Unpatch. self.service_connection._retry_handler = orig_retry - +@attr(route53=True) class TestCreateZoneRoute53(AWSMockServiceTestCase): connection_class = Route53Connection @@ -137,7 +138,7 @@ def test_create_hosted_zone(self): self.assertEqual(response['CreateHostedZoneResponse']['DelegationSet']['NameServers'], ['ns-100.awsdns-01.com', 'ns-1000.awsdns-01.co.uk', 'ns-1000.awsdns-01.org', 'ns-900.awsdns-01.net']) - +@attr(route53=True) class TestGetZoneRoute53(AWSMockServiceTestCase): connection_class = Route53Connection @@ -194,7 +195,7 @@ def test_get_zone(self): self.assertTrue(isinstance(response, Zone)) self.assertEqual(response.name, "example.com.") - +@attr(route53=True) class TestGetHostedZoneRoute53(AWSMockServiceTestCase): connection_class = Route53Connection @@ -231,7 +232,7 @@ def test_list_zones(self): self.assertEqual(response['GetHostedZoneResponse']['DelegationSet']['NameServers'], ['ns-1000.awsdns-40.org', 'ns-200.awsdns-30.com', 'ns-900.awsdns-50.net', 'ns-1000.awsdns-00.co.uk']) - +@attr(route53=True) class TestGetAllRRSetsRoute53(AWSMockServiceTestCase): connection_class = Route53Connection @@ -345,4 +346,119 @@ def test_get_all_rr_sets(self): self.assertEqual(failover_record.identifier, 'failover-primary') self.assertEqual(failover_record.failover, 'PRIMARY') self.assertEqual(failover_record.ttl, '60') + +@attr(route53=True) +class TestChangeResourceRecordSetsRoute53(AWSMockServiceTestCase): + connection_class = Route53Connection + + def setUp(self): + super(TestChangeResourceRecordSetsRoute53, self).setUp() + + def default_body(self): + return """ + + + /change/C1111111111111 + PENDING + 2014-05-05T10:11:12.123Z + + + """ + + def test_record_commit(self): + rrsets = ResourceRecordSets(self.service_connection) + rrsets.add_change_record('CREATE', Record('vanilla.example.com', 'A', 60, ['1.2.3.4'])) + rrsets.add_change_record('CREATE', Record('alias.example.com', 'AAAA', alias_hosted_zone_id='Z123OTHER', alias_dns_name='target.other', alias_evaluate_target_health=True)) + rrsets.add_change_record('CREATE', Record('wrr.example.com', 'CNAME', 60, ['cname.target'], weight=10, identifier='weight-1')) + rrsets.add_change_record('CREATE', Record('lbr.example.com', 'TXT', 60, ['text record'], region='us-west-2', identifier='region-1')) + rrsets.add_change_record('CREATE', Record('failover.example.com', 'A', 60, ['2.2.2.2'], health_check='hc-1234', failover='PRIMARY', identifier='primary')) + changes_xml = rrsets.to_xml() + + # the whitespacing doesn't match exactly, so we'll pretty print and drop all new lines + # not the best, but + actual_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(changes_xml).toprettyxml()) + expected_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(""" + + + None + + + CREATE + + vanilla.example.com + A + 60 + + + 1.2.3.4 + + + + + + CREATE + + alias.example.com + AAAA + + Z123OTHER + target.other + true + + + + + CREATE + + wrr.example.com + CNAME + weight-1 + 10 + 60 + + + cname.target + + + + + + CREATE + + lbr.example.com + TXT + region-1 + us-west-2 + 60 + + + text record + + + + + + CREATE + + failover.example.com + A + primary + PRIMARY + 60 + + + 2.2.2.2 + + + hc-1234 + + + + + + """).toprettyxml()) + + # Note: the alias XML should not include the TTL, even if it's specified in the object model + self.assertEqual(actual_xml, expected_xml) + From 60118ffec376536e45379c331f1d2b0624c3c125 Mon Sep 17 00:00:00 2001 From: "Daniel G. Taylor" Date: Wed, 7 May 2014 17:01:37 -0700 Subject: [PATCH 57/60] Add support for SQS message attributes --- boto/sqs/connection.py | 104 ++++++++++++++++-- boto/sqs/message.py | 9 +- boto/sqs/messageattributes.py | 66 ++++++++++++ boto/sqs/queue.py | 32 ++++-- docs/source/sqs_tut.rst | 32 ++++++ tests/unit/sqs/test_connection.py | 170 +++++++++++++++++++++++++++++- 6 files changed, 393 insertions(+), 20 deletions(-) create mode 100644 boto/sqs/messageattributes.py diff --git a/boto/sqs/connection.py b/boto/sqs/connection.py index 8fc69306bf..71ef7414cf 100644 --- a/boto/sqs/connection.py +++ b/boto/sqs/connection.py @@ -144,7 +144,7 @@ def set_queue_attribute(self, queue, attribute, value): def receive_message(self, queue, number_messages=1, visibility_timeout=None, attributes=None, - wait_time_seconds=None): + wait_time_seconds=None, message_attributes=None): """ Read messages from an SQS Queue. @@ -177,6 +177,11 @@ def receive_message(self, queue, number_messages=1, If a message is available, the call will return sooner than wait_time_seconds. + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + :rtype: list :return: A list of :class:`boto.sqs.message.Message` objects. @@ -188,6 +193,9 @@ def receive_message(self, queue, number_messages=1, self.build_list_params(params, attributes, 'AttributeName') if wait_time_seconds is not None: params['WaitTimeSeconds'] = wait_time_seconds + if message_attributes is not None: + self.build_list_params(params, message_attributes, + 'MessageAttributeName') return self.get_list('ReceiveMessage', params, [('Message', queue.message_class)], queue.id, queue) @@ -244,10 +252,61 @@ def delete_message_from_handle(self, queue, receipt_handle): params = {'ReceiptHandle' : receipt_handle} return self.get_status('DeleteMessage', params, queue.id) - def send_message(self, queue, message_content, delay_seconds=None): + def send_message(self, queue, message_content, delay_seconds=None, + message_attributes=None): + """ + Send a new message to the queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type message_content: string + :param message_content: The body of the message + + :type delay_seconds: int + :param delay_seconds: Number of seconds (0 - 900) to delay this + message from being processed. + + :type message_attributes: dict + :param message_attributes: Message attributes to set. Should be + of the form: + + { + "name1": { + "data_type": "Number", + "string_value": "1" + }, + "name2": { + "data_type": "String", + "string_value": "Bob" + } + } + + """ params = {'MessageBody' : message_content} if delay_seconds: params['DelaySeconds'] = int(delay_seconds) + + if message_attributes is not None: + for i, name in enumerate(message_attributes.keys(), start=1): + attribute = message_attributes[name] + params['MessageAttribute.%s.Name' % i] = name + if 'data_type' in attribute: + params['MessageAttribute.%s.Value.DataType' % i] = \ + attribute['data_type'] + if 'string_value' in attribute: + params['MessageAttribute.%s.Value.StringValue' % i] = \ + attribute['string_value'] + if 'binary_value' in attribute: + params['MessageAttribute.%s.Value.BinaryValue' % i] = \ + attribute['binary_value'] + if 'string_list_value' in attribute: + params['MessageAttribute.%s.Value.StringListValue' % i] = \ + attribute['string_list_value'] + if 'binary_list_value' in attribute: + params['MessageAttribute.%s.Value.BinaryListValue' % i] = \ + attribute['binary_list_value'] + return self.get_object('SendMessage', params, Message, queue.id, verb='POST') @@ -263,19 +322,44 @@ def send_message_batch(self, queue, messages): tuple represents a single message to be written and consists of and ID (string) that must be unique within the list of messages, the message body itself - which can be a maximum of 64K in length, and an + which can be a maximum of 64K in length, an integer which represents the delay time (in seconds) for the message (0-900) before the message will - be delivered to the queue. + be delivered to the queue, and an optional dict of + message attributes like those passed to ``send_message`` + above. + """ params = {} for i, msg in enumerate(messages): - p_name = 'SendMessageBatchRequestEntry.%i.Id' % (i+1) - params[p_name] = msg[0] - p_name = 'SendMessageBatchRequestEntry.%i.MessageBody' % (i+1) - params[p_name] = msg[1] - p_name = 'SendMessageBatchRequestEntry.%i.DelaySeconds' % (i+1) - params[p_name] = msg[2] + base = 'SendMessageBatchRequestEntry.%i' % (i + 1) + params['%s.Id' % base] = msg[0] + params['%s.MessageBody' % base] = msg[1] + params['%s.DelaySeconds' % base] = msg[2] + if len(msg) > 3: + base += '.MessageAttribute' + for j, name in enumerate(msg[3].keys()): + attribute = msg[3][name] + + p_name = '%s.%i.Name' % (base, j + 1) + params[p_name] = name + + if 'data_type' in attribute: + p_name = '%s.%i.DataType' % (base, j + 1) + params[p_name] = attribute['data_type'] + if 'string_value' in attribute: + p_name = '%s.%i.StringValue' % (base, j + 1) + params[p_name] = attribute['string_value'] + if 'binary_value' in attribute: + p_name = '%s.%i.BinaryValue' % (base, j + 1) + params[p_name] = attribute['binary_value'] + if 'string_list_value' in attribute: + p_name = '%s.%i.StringListValue' % (base, j + 1) + params[p_name] = attribute['string_list_value'] + if 'binary_list_value' in attribute: + p_name = '%s.%i.BinaryListValue' % (base, j + 1) + params[p_name] = attribute['binary_list_value'] + return self.get_object('SendMessageBatch', params, BatchResults, queue.id, verb='POST') diff --git a/boto/sqs/message.py b/boto/sqs/message.py index ce7976c1a8..d8b29d1a3b 100644 --- a/boto/sqs/message.py +++ b/boto/sqs/message.py @@ -66,6 +66,7 @@ import base64 import StringIO from boto.sqs.attributes import Attributes +from boto.sqs.messageattributes import MessageAttributes from boto.exception import SQSDecodeError import boto @@ -84,6 +85,8 @@ def __init__(self, queue=None, body=''): self.receipt_handle = None self.md5 = None self.attributes = Attributes(self) + self.message_attributes = MessageAttributes(self) + self.md5_message_attributes = None def __len__(self): return len(self.encode(self._body)) @@ -91,6 +94,8 @@ def __len__(self): def startElement(self, name, attrs, connection): if name == 'Attribute': return self.attributes + if name == 'MessageAttribute': + return self.message_attributes return None def endElement(self, name, value, connection): @@ -100,8 +105,10 @@ def endElement(self, name, value, connection): self.id = value elif name == 'ReceiptHandle': self.receipt_handle = value - elif name == 'MD5OfMessageBody': + elif name == 'MD5OfBody': self.md5 = value + elif name == 'MD5OfMessageAttributes': + self.md5_message_attributes = value else: setattr(self, name, value) diff --git a/boto/sqs/messageattributes.py b/boto/sqs/messageattributes.py new file mode 100644 index 0000000000..7e61bf3668 --- /dev/null +++ b/boto/sqs/messageattributes.py @@ -0,0 +1,66 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS MessageAttribute Name/Value set +""" + +class MessageAttributes(dict): + def __init__(self, parent): + self.parent = parent + self.current_key = None + self.current_value = None + + def startElement(self, name, attrs, connection): + if name == 'Value': + self.current_value = MessageAttributeValue(self) + return self.current_value + + def endElement(self, name, value, connection): + if name == 'MessageAttribute': + self[self.current_key] = self.current_value + elif name == 'Name': + self.current_key = value + elif name == 'Value': + pass + else: + setattr(self, name, value) + + +class MessageAttributeValue(dict): + def __init__(self, parent): + self.parent = parent + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'DataType': + self['data_type'] = value + elif name == 'StringValue': + self['string_value'] = value + elif name == 'BinaryValue': + self['binary_value'] = value + elif name == 'StringListValue': + self['string_list_value'] = value + elif name == 'BinaryListValue': + self['binary_list_value'] = value diff --git a/boto/sqs/queue.py b/boto/sqs/queue.py index 054b839e33..0f325d5601 100644 --- a/boto/sqs/queue.py +++ b/boto/sqs/queue.py @@ -182,7 +182,8 @@ def remove_permission(self, label): """ return self.connection.remove_permission(self, label) - def read(self, visibility_timeout=None, wait_time_seconds=None): + def read(self, visibility_timeout=None, wait_time_seconds=None, + message_attributes=None): """ Read a single message from the queue. @@ -195,11 +196,17 @@ def read(self, visibility_timeout=None, wait_time_seconds=None): If a message is available, the call will return sooner than wait_time_seconds. + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + :rtype: :class:`boto.sqs.message.Message` :return: A single message or None if queue is empty """ rs = self.get_messages(1, visibility_timeout, - wait_time_seconds=wait_time_seconds) + wait_time_seconds=wait_time_seconds, + message_attributes=message_attributes) if len(rs) == 1: return rs[0] else: @@ -216,8 +223,8 @@ def write(self, message, delay_seconds=None): :return: The :class:`boto.sqs.message.Message` object that was written. """ new_msg = self.connection.send_message(self, - message.get_body_encoded(), - delay_seconds) + message.get_body_encoded(), delay_seconds=delay_seconds, + message_attributes=message.message_attributes) message.id = new_msg.id message.md5 = new_msg.md5 return message @@ -231,10 +238,12 @@ def write_batch(self, messages): tuple represents a single message to be written and consists of and ID (string) that must be unique within the list of messages, the message body itself - which can be a maximum of 64K in length, and an + which can be a maximum of 64K in length, an integer which represents the delay time (in seconds) for the message (0-900) before the message will - be delivered to the queue. + be delivered to the queue, and an optional dict of + message attributes like those passed to ``send_message`` + in the connection class. """ return self.connection.send_message_batch(self, messages) @@ -254,7 +263,8 @@ def new_message(self, body='', **kwargs): # get a variable number of messages, returns a list of messages def get_messages(self, num_messages=1, visibility_timeout=None, - attributes=None, wait_time_seconds=None): + attributes=None, wait_time_seconds=None, + message_attributes=None): """ Get a variable number of messages. @@ -278,13 +288,19 @@ def get_messages(self, num_messages=1, visibility_timeout=None, If a message is available, the call will return sooner than wait_time_seconds. + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + :rtype: list :return: A list of :class:`boto.sqs.message.Message` objects. """ return self.connection.receive_message( self, number_messages=num_messages, visibility_timeout=visibility_timeout, attributes=attributes, - wait_time_seconds=wait_time_seconds) + wait_time_seconds=wait_time_seconds, + message_attributes=message_attributes) def delete_message(self, message): """ diff --git a/docs/source/sqs_tut.rst b/docs/source/sqs_tut.rst index cd10041a76..f86aa3e815 100644 --- a/docs/source/sqs_tut.rst +++ b/docs/source/sqs_tut.rst @@ -113,6 +113,25 @@ The write method will return the ``Message`` object. The ``id`` and ``md5`` attribute of the ``Message`` object will be updated with the values of the message that was written to the queue. +Arbitrary message attributes can be defined by setting a simple dictionary +of values on the message object:: + +>>> m = Message() +>>> m.message_attributes = { + "name1": { + "data_type": "String", + "string_value": "I am a string" + }, + "name2": { + "data_type": "Number", + "string_value": "12" + } +} + +Note that by default, these arbitrary attributes are not returned when +you request messages from a queue. Instead, you must request them via +the ``message_attributes`` parameter (see below). + If the message cannot be written an ``SQSError`` exception will be raised. Writing Messages (Custom Format) @@ -206,6 +225,19 @@ a visibility_timeout parameter to read, if you desire: >>> m.get_body() u'This is my first message' +Reading Message Attributes +-------------------------- +By default, no arbitrary message attributes are returned when requesting +messages. You can change this behavior by specifying the names of attributes +you wish to have returned:: + +>>> rs = queue.get_messages(message_attributes=['name1', 'name2']) +>>> print rs[0].message_attributes['name1']['string_value'] +'I am a string' + +A special value of ``All`` or ``.*`` may be passed to return all available +message attributes. + Deleting Messages and Queues ---------------------------- As stated above, messages are never deleted by the queue unless explicitly told to do so. diff --git a/tests/unit/sqs/test_connection.py b/tests/unit/sqs/test_connection.py index 918461b664..613c4bfd33 100644 --- a/tests/unit/sqs/test_connection.py +++ b/tests/unit/sqs/test_connection.py @@ -1,5 +1,6 @@ #!/usr/bin/env python # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the @@ -26,6 +27,8 @@ from boto.sqs.connection import SQSConnection from boto.sqs.regioninfo import SQSRegionInfo +from boto.sqs.message import RawMessage +from boto.sqs.queue import Queue class SQSAuthParams(AWSMockServiceTestCase): @@ -104,7 +107,172 @@ def test_get_queue_with_owner_account_id_returns_queue(self): assert 'QueueOwnerAWSAccountId' in self.actual_request.params.keys() self.assertEquals(self.actual_request.params['QueueOwnerAWSAccountId'], '599169622985') - + + +class SQSMessageAttributesParsing(AWSMockServiceTestCase): + connection_class = SQSConnection + + def default_body(self): + return """ + + + + This is a test + +eXJYhj5rDql5hp2VwGkXvQVsefdjAlsQe5EGS57gyORPB48KwP1d/3Rfy4DrQXt+MgfRPHUCUH36xL9+Ol/UWD/ylKrrWhiXSY0Ip4EsI8jJNTo/aneEjKE/iZnz/nL8MFP5FmMj8PbDAy5dgvAqsdvX1rm8Ynn0bGnQLJGfH93cLXT65p6Z/FDyjeBN0M+9SWtTcuxOIcMdU8NsoFIwm/6mLWgWAV46OhlYujzvyopCvVwsj+Y8jLEpdSSvTQHNlQEaaY/V511DqAvUwru2p0ZbW7ZzcbhUTn6hHkUROo= + ce114e4501d2f4e2dcea3e17b546f339 + + Count + + Number + 1 + + + + Foo + + String + Bar + + + 7049431b-e5f6-430b-93c4-ded53864d02b + 324758f82d026ac6ec5b31a3b192d1e3 + + + + 73f978f2-400b-5460-8d38-3316e39e79c6 + +""" + + def test_message_attribute_response(self): + self.set_http_response(status_code=200) + + queue = Queue( + url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/', + message_class=RawMessage) + message = self.service_connection.receive_message(queue)[0] + + self.assertEqual(message.get_body(), 'This is a test') + self.assertEqual(message.id, '7049431b-e5f6-430b-93c4-ded53864d02b') + self.assertEqual(message.md5, 'ce114e4501d2f4e2dcea3e17b546f339') + self.assertEqual(message.md5_message_attributes, + '324758f82d026ac6ec5b31a3b192d1e3') + + mattributes = message.message_attributes + self.assertEqual(len(mattributes.keys()), 2) + self.assertEqual(mattributes['Count']['data_type'], 'Number') + self.assertEqual(mattributes['Foo']['string_value'], 'Bar') + + +class SQSSendMessageAttributes(AWSMockServiceTestCase): + connection_class = SQSConnection + + def default_body(self): + return """ + + + fafb00f5732ab283681e124bf8747ed1 + + + 3ae8f24a165a8cedc005670c81a27295 + + + 5fea7756-0ea4-451a-a703-a558b933e274 + + + + + 27daac76-34dd-47df-bd01-1f6e873584a0 + + + +""" + + def test_send_message_attributes(self): + self.set_http_response(status_code=200) + + queue = Queue( + url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/', + message_class=RawMessage) + self.service_connection.send_message(queue, 'Test message', + message_attributes={ + 'name1': { + 'data_type': 'String', + 'string_value': 'Bob' + }, + 'name2': { + 'data_type': 'Number', + 'string_value': '1' + } + }) + + self.assert_request_parameters({ + 'Action': 'SendMessage', + 'MessageAttribute.1.Name': 'name2', + 'MessageAttribute.1.Value.DataType': 'Number', + 'MessageAttribute.1.Value.StringValue': '1', + 'MessageAttribute.2.Name': 'name1', + 'MessageAttribute.2.Value.DataType': 'String', + 'MessageAttribute.2.Value.StringValue': 'Bob', + 'MessageBody': 'Test message', + 'Version': '2012-11-05' + }) + + +class SQSSendBatchMessageAttributes(AWSMockServiceTestCase): + connection_class = SQSConnection + + def default_body(self): + return """ + + + test_msg_001 + 0a5231c7-8bff-4955-be2e-8dc7c50a25fa + 0e024d309850c78cba5eabbeff7cae71 + + + test_msg_002 + 15ee1ed3-87e7-40c1-bdaa-2e49968ea7e9 + 7fb8146a82f95e0af155278f406862c2 + 295c5fa15a51aae6884d1d7c1d99ca50 + + + + ca1ad5d0-8271-408b-8d0f-1351bf547e74 + + +""" + + def test_send_message_attributes(self): + self.set_http_response(status_code=200) + + queue = Queue( + url='http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/', + message_class=RawMessage) + + message1 = (1, 'Message 1', 0, {'name1': {'data_type': 'String', + 'string_value': 'foo'}}) + message2 = (2, 'Message 2', 0, {'name2': {'data_type': 'Number', + 'string_value': '1'}}) + + self.service_connection.send_message_batch(queue, (message1, message2)) + + self.assert_request_parameters({ + 'Action': 'SendMessageBatch', + 'SendMessageBatchRequestEntry.1.DelaySeconds': 0, + 'SendMessageBatchRequestEntry.1.Id': 1, + 'SendMessageBatchRequestEntry.1.MessageAttribute.1.DataType': 'String', + 'SendMessageBatchRequestEntry.1.MessageAttribute.1.Name': 'name1', + 'SendMessageBatchRequestEntry.1.MessageAttribute.1.StringValue': 'foo', + 'SendMessageBatchRequestEntry.1.MessageBody': 'Message 1', + 'SendMessageBatchRequestEntry.2.DelaySeconds': 0, + 'SendMessageBatchRequestEntry.2.Id': 2, + 'SendMessageBatchRequestEntry.2.MessageAttribute.1.DataType': 'Number', + 'SendMessageBatchRequestEntry.2.MessageAttribute.1.Name': 'name2', + 'SendMessageBatchRequestEntry.2.MessageAttribute.1.StringValue': '1', + 'SendMessageBatchRequestEntry.2.MessageBody': 'Message 2', + 'Version': '2012-11-05' + }) + if __name__ == '__main__': unittest.main() From e8f3043aeec1c669d06052cc1de8b44dc29bf185 Mon Sep 17 00:00:00 2001 From: "Daniel G. Taylor" Date: Thu, 8 May 2014 13:17:54 -0700 Subject: [PATCH 58/60] Add Cloudsearch2 reference docs to index --- docs/source/index.rst | 1 + docs/source/ref/cloudsearch2.rst | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/source/index.rst b/docs/source/index.rst index df4ca44d00..19ba44239f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -55,6 +55,7 @@ Currently Supported Services * **Application Services** + * Cloudsearch 2 -- (:doc:`API Reference `) * :doc:`Cloudsearch ` -- (:doc:`API Reference `) * Elastic Transcoder -- (:doc:`API Reference `) * :doc:`Simple Workflow Service (SWF) ` -- (:doc:`API Reference `) diff --git a/docs/source/ref/cloudsearch2.rst b/docs/source/ref/cloudsearch2.rst index 98d4a22403..64edff19b7 100644 --- a/docs/source/ref/cloudsearch2.rst +++ b/docs/source/ref/cloudsearch2.rst @@ -1,4 +1,4 @@ -.. ref-cloudsearch +.. ref-cloudsearch2 =========== Cloudsearch From b9577450b017e90b2d24086790087b756c4417f0 Mon Sep 17 00:00:00 2001 From: "Daniel G. Taylor" Date: Thu, 8 May 2014 13:20:20 -0700 Subject: [PATCH 59/60] Added release notes for 2.28.0 --- docs/source/index.rst | 1 + docs/source/releasenotes/v2.28.0.rst | 38 ++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 docs/source/releasenotes/v2.28.0.rst diff --git a/docs/source/index.rst b/docs/source/index.rst index 19ba44239f..2eed7c2a3f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -119,6 +119,7 @@ Release Notes .. toctree:: :titlesonly: + releasenotes/v2.28.0 releasenotes/v2.27.0 releasenotes/v2.26.1 releasenotes/v2.26.0 diff --git a/docs/source/releasenotes/v2.28.0.rst b/docs/source/releasenotes/v2.28.0.rst new file mode 100644 index 0000000000..4297c555a6 --- /dev/null +++ b/docs/source/releasenotes/v2.28.0.rst @@ -0,0 +1,38 @@ +boto v2.28.0 +============ + +:date: 2014/05/08 + +This release adds support for Amazon SQS message attributes, Amazon DynamoDB query filters and enhanced conditional operators, adds support for the new Amazon CloudSearch 2013-01-01 API and includes various features and fixes for Amazon Route 53, Amazon EC2, Amazon Elastic Beanstalk, Amazon Glacier, AWS Identity and Access Management (IAM), Amazon S3, Mechanical Turk and MWS. + +Changes +------- +* Add support for SQS message attributes. (:issue:`2257`, :sha:`a04ca92`) +* Update DynamoDB to support query filters. (:issue:`2242`, :sha:`141eb71`) +* Implement new Cloudsearch API 2013-01-01 as cloudsearch2 module (:sha:`b0ababa`) +* Miscellaneous improvements to the MTurk CLI. (:issue:`2188`, :sha:`c213ff1`) +* Update MWS to latest API version and adds missing API calls. (:issue:`2203`, :issue:`2201`, :sha:`8adf720`, :sha:`8d0a6a8`) +* Update EC2 `register_image` to expose an option which sets whether an instance store is deleted on termination. The default value is left as-is. (:sha:`d295ee9`) +* Correct typo "possile" --> "possible". (:issue:`2196`, :sha:`d228352`) +* Update Boto configuration tutorial (:issue:`2191`, :sha:`f2a7a08`) +* Clarify that MTurkConnection.get_assignments attributes are actually strings. (:issue:`2187`, :issue:`2176`, :sha:`075636b`) +* Fix EC2 documentation typo (:issue:`2178`, :sha:`2627843`) +* Add support for ELB Connection Draining attribute. (:issue:`2174`, :issue:`2173`, :sha:`78fa43c`) +* Add support for setting failure threshold for Route53 health checks. (:issue:`2171`, :issue:`2170`, :sha:`15b812f`) +* Fix specification of Elastic Beanstalk tier parameter. (:issue:`2168`, :sha:`4492e86`) +* Fixed part of roboto for euca2ools. (:issue:`2166`, :issue:`1730`, :sha:`63b7a34`) +* Fixed removing policies from listeners. (:issue:`2165`, :issue:`1708`, :sha:`e5a2d9b`) +* Reintroduced the ``reverse`` fix for DDB. (:issue:`2163`, :sha:`70ec722`) +* Several fixes to DynamoDB describe calls. (:issue:`2161`, :issue:`1649`, :issue:`1663`, :sha:`84fb748`) +* Fixed how ``reverse`` works in DynamoDBv2. (:issue:`2160`, :issue:`2070`, :issue:`2115`, :sha:`afdd805`) +* Update Kinesis exceptions (:issue:`2159`, :issue:`2153`, :sha:`22c6751`) +* Fix ECS problem using new-style classes (:issue:`2103`, :sha:`dc466c7`) +* Add support for passing region info from SWF layer2 to layer1 (:issue:`2137`, :sha:`0dc8ce6`) +* Handle plus signs in S3 metadata (:issue:`2145`, :sha:`c2a0f95`) +* Fix Glacier vault date parsing (:issue:`2158`, :sha:`9e7b132`) +* Documentation fix. (:issue:`2156`, :sha:`7592a58`) +* Fix Route53 evaluate target health bug. (:issue:`2157`, :sha:`398bb62`) +* Removing obselete core directory. (:issue:`1987`, :sha:`8e83292`) +* Improve IAM behavior in the cn-north-1 region. (:issue:`2152`, :sha:`4050e70`) +* Add SetIdentityFeedbackForwardingEnabled and SetIdentityNotificationTopic for SES. (:issue:`2130`, :issue:`2128`, :sha:`83002d5`) +* Altered Route53 bin script to use UPSERT rather than CREATE. (:issue:`2151`, :sha:`2cd20e7`) From 4b659ba1011ecb94746bc4403c205266a8a3fd69 Mon Sep 17 00:00:00 2001 From: "Daniel G. Taylor" Date: Thu, 8 May 2014 13:21:22 -0700 Subject: [PATCH 60/60] Version bump to 2.28.0 --- README.rst | 4 ++-- boto/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index ccb50b69bb..7efd7fd08f 100644 --- a/README.rst +++ b/README.rst @@ -1,9 +1,9 @@ #### boto #### -boto 2.27.0 +boto 2.28.0 -Released: 6-March-2014 +Released: 8-May-2014 .. image:: https://travis-ci.org/boto/boto.png?branch=develop :target: https://travis-ci.org/boto/boto diff --git a/boto/__init__.py b/boto/__init__.py index 791a5e7f2d..53464c335a 100644 --- a/boto/__init__.py +++ b/boto/__init__.py @@ -37,7 +37,7 @@ import urlparse from boto.exception import InvalidUriError -__version__ = '2.27.0' +__version__ = '2.28.0' Version = __version__ # for backware compatibility # http://bugs.python.org/issue7980