Skip to content

Commit

Permalink
Merge branch 'release-0.0.19'
Browse files Browse the repository at this point in the history
* release-0.0.19:
  Bumping version to 0.0.19
  Bump boto3's dep packages to latest versions
  Fix DynamoDB leaking tables in integ tests
  Fix log message
  Remove page_size and limit from all
  Rename unit test file to follow naming conventions
  Update boto3 to new botocore pagination interface
  Reduce log messages in batch to a single message
  Initial commit of DynamoDB batch writer
  • Loading branch information
jamesls committed Jun 4, 2015
2 parents 21ef90c + 4cd3a5b commit 8af77f0
Show file tree
Hide file tree
Showing 11 changed files with 436 additions and 38 deletions.
2 changes: 1 addition & 1 deletion boto3/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@


__author__ = 'Amazon Web Services'
__version__ = '0.0.18'
__version__ = '0.0.19'


# The default Boto3 session; autoloaded when needed.
Expand Down
121 changes: 121 additions & 0 deletions boto3/dynamodb/table.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging


logger = logging.getLogger(__name__)


def register_table_methods(base_classes, **kwargs):
base_classes.insert(0, TableResource)


# This class can be used to add any additional methods we want
# onto a table resource. Ideally to avoid creating a new
# base class for every method we can just update this
# class instead. Just be sure to move the bulk of the
# actual method implementation to another class.
class TableResource(object):
def __init__(self, *args, **kwargs):
super(TableResource, self).__init__(*args, **kwargs)

def batch_writer(self):
"""Create a batch writer object.
This method creates a context manager for writing
objects to Amazon DynamoDB in batch.
The batch writer will automatically handle buffering and sending items
in batches. In addition, the batch writer will also automatically
handle any unprocessed items and resend them as needed. All you need
to do is call ``put_item`` for any items you want to add, and
``delete_item`` for any items you want to delete.
Example usage::
with table.batch_writer() as batch:
for _ in xrange(1000000):
batch.put_item(Item={'HashKey': '...',
'Otherstuff': '...'})
# You can also delete_items in a batch.
batch.delete_item(Key={'HashKey': 'SomeHashKey'})
"""
return BatchWriter(self.name, self.meta.client)


class BatchWriter(object):
"""Automatically handle batch writes to DynamoDB for a single table."""
def __init__(self, table_name, client, flush_amount=25):
"""
:type table_name: str
:param table_name: The name of the table. The class handles
batch writes to a single table.
:type client: ``botocore.client.Client``
:param client: A botocore client. Note this client
**must** have the dynamodb customizations applied
to it for transforming AttributeValues into the
wire protocol. What this means in practice is that
you need to use a client that comes from a DynamoDB
resource if you're going to instantiate this class
directly, i.e
``boto3.resource('dynamodb').Table('foo').meta.client``.
:type flush_amount: int
:param flush_amount: The number of items to keep in
a local buffer before sending a batch_write_item
request to DynamoDB.
"""
self._table_name = table_name
self._client = client
self._items_buffer = []
self._flush_amount = flush_amount

def put_item(self, Item):
self._items_buffer.append({'PutRequest': {'Item': Item}})
self._flush_if_needed()

def delete_item(self, Key):
self._items_buffer.append({'DeleteRequest': {'Key': Key}})
self._flush_if_needed()

def _flush_if_needed(self):
if len(self._items_buffer) >= self._flush_amount:
self._flush()

def _flush(self):
response = self._client.batch_write_item(
RequestItems={self._table_name: self._items_buffer})
unprocessed_items = response['UnprocessedItems']

if unprocessed_items and unprocessed_items[self._table_name]:
# Any unprocessed_items are immediately added to the
# next batch we send.
self._items_buffer = unprocessed_items[self._table_name]
else:
self._items_buffer = []
logger.debug("Batch write sent %s, unprocessed: %s",
self._flush_amount, len(self._items_buffer))

def __enter__(self):
return self

def __exit__(self, exc_type, exc_value, tb):
# When we exit, we need to keep flushing whatever's left
# until there's nothing left in our items buffer.
while self._items_buffer:
self._flush()
17 changes: 6 additions & 11 deletions boto3/resources/collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,8 @@ def pages(self):
self._py_operation_name, params)
paginator = client.get_paginator(self._py_operation_name)
pages = paginator.paginate(
max_items=limit, page_size=page_size, **params)
PaginationConfig={
'MaxItems': limit, 'PageSize': page_size}, **params)
else:
logger.info('Calling %s:%s with %r',
self._parent.meta.service_name,
Expand All @@ -178,7 +179,7 @@ def pages(self):
if limit is not None and count >= limit:
break

def all(self, limit=None, page_size=None):
def all(self):
"""
Get all items from the collection, optionally with a custom
page size and item count limit.
Expand All @@ -196,14 +197,8 @@ def all(self, limit=None, page_size=None):
>>> queues = list(sqs.queues.all())
>>> len(queues)
2
:type limit: int
:param limit: Return no more than this many items
:type page_size: int
:param page_size: Fetch this many items per request
:rtype: :py:class:`ResourceCollection`
"""
return self._clone(limit=limit, page_size=page_size)
return self._clone()

def filter(self, **kwargs):
"""
Expand Down Expand Up @@ -337,8 +332,8 @@ def iterator(self, **kwargs):
self._handler, **kwargs)

# Set up some methods to proxy ResourceCollection methods
def all(self, limit=None, page_size=None):
return self.iterator(limit=limit, page_size=page_size)
def all(self):
return self.iterator()
all.__doc__ = ResourceCollection.all.__doc__

def filter(self, **kwargs):
Expand Down
5 changes: 5 additions & 0 deletions boto3/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,3 +284,8 @@ def _register_default_handlers(self):
boto3.utils.lazy_call(
'boto3.dynamodb.transform.register_high_level_interface'),
unique_id='high-level-dynamodb')
self._session.register(
'creating-resource-class.dynamodb.Table',
boto3.utils.lazy_call(
'boto3.dynamodb.table.register_table_methods'),
unique_id='high-level-dynamodb-table')
13 changes: 2 additions & 11 deletions docs/source/guide/collections.rst
Original file line number Diff line number Diff line change
Expand Up @@ -93,17 +93,12 @@ Limiting Results
----------------
It is possible to limit the number of items returned from a collection
by using either the
:py:meth:`~boto3.resources.collection.ResourceCollection.limit` method or
keyword argument::
:py:meth:`~boto3.resources.collection.ResourceCollection.limit` method::

# S3 iterate over first ten buckets
for bucket in s3.buckets.limit(10):
print(bucket.name)

# Keyword argument example
for bucket in s3.buckets.filter(limit=10):
print(bucket.name)

In both cases, up to 10 items total will be returned. If you do not
have 10 buckets, then all of your buckets will be returned.

Expand All @@ -112,16 +107,12 @@ Controlling Page Size
Collections automatically handle paging through results, but you may want
to control the number of items returned from a single service operation
call. You can do so using the
:py:meth:`~boto3.resources.collection.ResourceCollection.page_size` method
or keyword argument::
:py:meth:`~boto3.resources.collection.ResourceCollection.page_size` method::

# S3 iterate over all objects 100 at a time
for obj in bucket.objects.page_size(100):
print(obj.key)

# Keyword argument example
for obj in bucket.objects.all(page_size=100):
print(obj.key)

By default, S3 will return 1000 objects at a time, so the above code
would let you process the items in smaller batches, which could be
Expand Down
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@


requires = [
'botocore>=1.0.0a3,<1.0.0b',
'bcdoc==0.12.2',
'botocore==1.0.0b1',
'bcdoc>=0.15.0,<0.16.0',
'jmespath>=0.6.2,<1.0.0',
]

Expand Down
12 changes: 12 additions & 0 deletions tests/functional/dynamodb/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
27 changes: 27 additions & 0 deletions tests/functional/dynamodb/test_table.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, mock

import boto3


class TestTableResourceCustomizations(unittest.TestCase):

maxDiff = None

def setUp(self):
self.resource = boto3.resource('dynamodb')

def test_resource_has_batch_writer_added(self):
table = self.resource.Table('mytable')
self.assertTrue(hasattr(table, 'batch_writer'))
18 changes: 17 additions & 1 deletion tests/integration/test_dynamodb.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def setUpClass(cls):
@classmethod
def tearDownClass(cls):
cls.table.delete_item(Key={'MyHashKey': 'mykey'})
super(TestDynamoDBConditions, cls).setUpClass()
super(TestDynamoDBConditions, cls).tearDownClass()

def test_filter_expression(self):
r = self.table.scan(
Expand Down Expand Up @@ -186,3 +186,19 @@ def test_condition_in_list(self):
r = self.table.scan(
FilterExpression=Attr('MyList[0]').eq('foo'))
self.assertEqual(r['Items'][0]['MyList'][0], 'foo')


class TestDynamodbBatchWrite(BaseDynamoDBTest):
def test_batch_write_items(self):
num_elements = 1000
items = []
for i in range(num_elements):
items.append({'MyHashKey': 'foo%s' % i,
'OtherKey': 'bar%s' % i})
with self.table.batch_writer() as batch:
for item in items:
batch.put_item(Item=item)

# Verify all the items were added to dynamodb.
for obj in self.table.scan()['Items']:
self.assertIn(obj, items)
Loading

0 comments on commit 8af77f0

Please sign in to comment.