Skip to content

Commit

Permalink
PEP8 refactoring.
Browse files Browse the repository at this point in the history
  • Loading branch information
tkem committed Jun 6, 2015
1 parent fcf20a4 commit d1b068e
Show file tree
Hide file tree
Showing 10 changed files with 168 additions and 153 deletions.
11 changes: 6 additions & 5 deletions cachetools/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
"""Extensible memoizing collections and decorators"""

from .cache import Cache
from .decorators import cachedmethod
from .lfu import LFUCache, lfu_cache
from .lru import LRUCache, lru_cache
from .rr import RRCache, rr_cache
from .ttl import TTLCache, ttl_cache
from .func import lfu_cache, lru_cache, rr_cache, ttl_cache
from .lfu import LFUCache
from .lru import LRUCache
from .method import cachedmethod
from .rr import RRCache
from .ttl import TTLCache

__all__ = (
'Cache',
Expand Down
97 changes: 0 additions & 97 deletions cachetools/decorators.py

This file was deleted.

120 changes: 120 additions & 0 deletions cachetools/func.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import collections
import functools
import random
import time

from .lfu import LFUCache
from .lru import LRUCache
from .rr import RRCache
from .ttl import TTLCache

try:
from threading import RLock
except ImportError:
from dummy_threading import RLock


_CacheInfo = collections.namedtuple('CacheInfo', [
'hits', 'misses', 'maxsize', 'currsize'
])


class _NullContext:
def __enter__(self):
pass

def __exit__(self, exc_type, exc_val, exc_tb):
pass


_nullcontext = _NullContext()


def _makekey_untyped(args, kwargs):
return (args, tuple(sorted(kwargs.items())))


def _makekey_typed(args, kwargs):
key = _makekey_untyped(args, kwargs)
key += tuple(type(v) for v in args)
key += tuple(type(v) for _, v in sorted(kwargs.items()))
return key


def _cachedfunc(cache, typed=False, lock=None):
makekey = _makekey_typed if typed else _makekey_untyped
context = lock() if lock else _nullcontext

def decorator(func):
stats = [0, 0]

def wrapper(*args, **kwargs):
key = makekey(args, kwargs)
with context:
try:
result = cache[key]
stats[0] += 1
return result
except KeyError:
stats[1] += 1
result = func(*args, **kwargs)
with context:
try:
cache[key] = result
except ValueError:
pass # value too large
return result

def cache_info():
with context:
hits, misses = stats
maxsize = cache.maxsize
currsize = cache.currsize
return _CacheInfo(hits, misses, maxsize, currsize)

def cache_clear():
with context:
cache.clear()

wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return functools.update_wrapper(wrapper, func)

return decorator


def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
return _cachedfunc(LFUCache(maxsize, getsizeof), typed, lock)


def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
return _cachedfunc(LRUCache(maxsize, getsizeof), typed, lock)


def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None,
lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
return _cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock)


def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False,
getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
return _cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock)
11 changes: 0 additions & 11 deletions cachetools/lfu.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@
import operator

from .cache import Cache
from .decorators import cachedfunc
from .lock import RLock


class LFUCache(Cache):
Expand Down Expand Up @@ -33,12 +31,3 @@ def popitem(self):
except ValueError:
raise KeyError('cache is empty')
return key, self.pop(key)


def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Frequently Used (LFU)
algorithm.
"""
return cachedfunc(LFUCache(maxsize, getsizeof), typed, lock)
6 changes: 0 additions & 6 deletions cachetools/lock.py

This file was deleted.

11 changes: 0 additions & 11 deletions cachetools/lru.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
from .cache import Cache
from .decorators import cachedfunc
from .lock import RLock


class Link(object):
Expand Down Expand Up @@ -85,12 +83,3 @@ def popitem(self):
Cache.__delitem__(self, key)
link.unlink()
return (key, link.value)


def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm.
"""
return cachedfunc(LRUCache(maxsize, getsizeof), typed, lock)
42 changes: 42 additions & 0 deletions cachetools/method.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import functools


def _makekey_untyped(method, args, kwargs):
return (method, args, tuple(sorted(kwargs.items())))


def _makekey_typed(method, args, kwargs):
key = _makekey_untyped(method, args, kwargs)
key += tuple(type(v) for v in args)
key += tuple(type(v) for _, v in sorted(kwargs.items()))
return key


def cachedmethod(cache, typed=False):
"""Decorator to wrap a class or instance method with a memoizing
callable that saves results in a (possibly shared) cache.
"""
makekey = _makekey_typed if typed else _makekey_untyped

def decorator(method):
def wrapper(self, *args, **kwargs):
mapping = cache(self)
if mapping is None:
return method(self, *args, **kwargs)
key = makekey(method, args, kwargs)
try:
return mapping[key]
except KeyError:
pass
result = method(self, *args, **kwargs)
try:
mapping[key] = result
except ValueError:
pass # value too large
return result

wrapper.cache = cache
return functools.update_wrapper(wrapper, method)

return decorator
12 changes: 0 additions & 12 deletions cachetools/rr.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
import random

from .cache import Cache
from .decorators import cachedfunc
from .lock import RLock


class RRCache(Cache):
Expand All @@ -25,13 +23,3 @@ def popitem(self):
def choice(self):
"""The `choice` function used by the cache."""
return self.__choice


def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None,
lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Random Replacement (RR)
algorithm.
"""
return cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock)
11 changes: 0 additions & 11 deletions cachetools/ttl.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@
import time

from .cache import Cache
from .decorators import cachedfunc
from .lock import RLock


class Link(object):
Expand Down Expand Up @@ -225,12 +223,3 @@ def wrapper(self, *args, **kwargs):
get = __nested(Cache.get)
pop = __nested(Cache.pop)
setdefault = __nested(Cache.setdefault)


def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False,
getsizeof=None, lock=RLock):
"""Decorator to wrap a function with a memoizing callable that saves
up to `maxsize` results based on a Least Recently Used (LRU)
algorithm with a per-item time-to-live (TTL) value.
"""
return cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock)
File renamed without changes.

0 comments on commit d1b068e

Please sign in to comment.