Skip to content
This repository has been archived by the owner on Jan 19, 2022. It is now read-only.

Commit

Permalink
added DEFAULT_ITEM_SIZE
Browse files Browse the repository at this point in the history
  • Loading branch information
tarekziade committed Oct 5, 2012
1 parent 876fc98 commit 8c0732d
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 25 deletions.
3 changes: 2 additions & 1 deletion README.rst
Expand Up @@ -30,6 +30,7 @@ Here's an example::
'MAX_POOL_SIZE': 100, 'MAX_POOL_SIZE': 100,
'BLACKLIST_TIME': 20, 'BLACKLIST_TIME': 20,
'SOCKET_TIMEOUT': 5, 'SOCKET_TIMEOUT': 5,
'MAX_ITEM_SIZE': 1000*100,
} }
} }
} }
Expand All @@ -40,5 +41,5 @@ Options:
- **MAX_POOL_SIZE:** -- The maximum number of connectors in the pool. default: 35. - **MAX_POOL_SIZE:** -- The maximum number of connectors in the pool. default: 35.
- **BLACKLIST_TIME** -- The time in seconds a server stays in the blacklist. default: 60 - **BLACKLIST_TIME** -- The time in seconds a server stays in the blacklist. default: 60
- **SOCKET_TIMEOUT** -- the time in seconds for the socket timeout. default: 4 - **SOCKET_TIMEOUT** -- the time in seconds for the socket timeout. default: 4

- **MAX_ITEM_SIZE** -- The maximum size for an item in Memcache.


33 changes: 9 additions & 24 deletions memcachepool/cache.py
Expand Up @@ -13,6 +13,9 @@
from memcachepool.pool import ClientPool from memcachepool.pool import ClientPool




DEFAULT_ITEM_SIZE = 1000 * 1000


# XXX using python-memcached style pickling # XXX using python-memcached style pickling
# but maybe we could use something else like # but maybe we could use something else like
# json # json
Expand All @@ -39,6 +42,8 @@ def __init__(self, server, params):
self.maxsize = int(params.get('MAX_POOL_SIZE', 35)) self.maxsize = int(params.get('MAX_POOL_SIZE', 35))
self.blacklist_time = int(params.get('BLACKLIST_TIME', 60)) self.blacklist_time = int(params.get('BLACKLIST_TIME', 60))
self.socktimeout = int(params.get('SOCKET_TIMEOUT', 4)) self.socktimeout = int(params.get('SOCKET_TIMEOUT', 4))
self.max_item_size = long(params.get('MAX_ITEM_SIZE',
DEFAULT_ITEM_SIZE))
self._pool = ClientPool(self._get_client, maxsize=self.maxsize) self._pool = ClientPool(self._get_client, maxsize=self.maxsize)
self._blacklist = {} self._blacklist = {}


Expand All @@ -63,30 +68,10 @@ def _get_client(self):
server = self._pick_server() server = self._pick_server()
last_error = None last_error = None


# until my merge makes it upstream def create_client(server):
# see : https://github.com/esnme/ultramemcache/pull/9 cli = self._lib.Client(server, max_item_size=self.max_item_size)
# cli.sock.settimeout(self.socktimeout)
# we are going to fallback on the poor's man technique return cli
# to define the socket timeout.
#
# Unfortunately, this change impacts all sockets created
# in the interim in the same process, but that should
# not be a problem since we'll usually set this
# timeout to 5 seconds, which is long enough for any
# protocol
if hasattr(self._lib.Client, 'sock'): # NOQA
def create_client(server):
cli = self._lib.Client(server)
cli.sock.settimeout(self.socktimeout)
return cli
else:
def create_client(client): # NOQA
old = socket.getdefaulttimeout()
socket.setdefaulttimeout(self.socktimeout)
try:
return self._lib.Client(server)
finally:
socket.setdefaulttimeout(old)


while server is not None: while server is not None:
cli = create_client(server) cli = create_client(server)
Expand Down

0 comments on commit 8c0732d

Please sign in to comment.