forked from aio-libs/aiocache
/
base.py
558 lines (458 loc) · 19.7 KB
/
base.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
import asyncio
import functools
import logging
import os
import time
from abc import abstractmethod
from enum import Enum
from types import TracebackType
from typing import Callable, Generic, List, Optional, Set, TYPE_CHECKING, Type, TypeVar
from aiocache.serializers import StringSerializer
if TYPE_CHECKING: # pragma: no cover
from aiocache.plugins import BasePlugin
from aiocache.serializers import BaseSerializer
logger = logging.getLogger(__name__)
SENTINEL = object()
CacheKeyType = TypeVar("CacheKeyType")
class API:
CMDS: Set[Callable[..., object]] = set()
@classmethod
def register(cls, func):
API.CMDS.add(func)
return func
@classmethod
def unregister(cls, func):
API.CMDS.discard(func)
@classmethod
def timeout(cls, func):
"""
This decorator sets a maximum timeout for a coroutine to execute. The timeout can be both
set in the ``self.timeout`` attribute or in the ``timeout`` kwarg of the function call.
I.e if you have a function ``get(self, key)``, if its decorated with this decorator, you
will be able to call it with ``await get(self, "my_key", timeout=4)``.
Use 0 or None to disable the timeout.
"""
NOT_SET = "NOT_SET"
@functools.wraps(func)
async def _timeout(self, *args, timeout=NOT_SET, **kwargs):
timeout = self.timeout if timeout == NOT_SET else timeout
if timeout == 0 or timeout is None:
return await func(self, *args, **kwargs)
return await asyncio.wait_for(func(self, *args, **kwargs), timeout)
return _timeout
@classmethod
def aiocache_enabled(cls, fake_return=None):
"""
Use this decorator to be able to fake the return of the function by setting the
``AIOCACHE_DISABLE`` environment variable
"""
def enabled(func):
@functools.wraps(func)
async def _enabled(*args, **kwargs):
if os.getenv("AIOCACHE_DISABLE") == "1":
return fake_return
return await func(*args, **kwargs)
return _enabled
return enabled
@classmethod
def plugins(cls, func):
@functools.wraps(func)
async def _plugins(self, *args, **kwargs):
start = time.monotonic()
for plugin in self.plugins:
await getattr(plugin, "pre_{}".format(func.__name__))(self, *args, **kwargs)
ret = await func(self, *args, **kwargs)
end = time.monotonic()
for plugin in self.plugins:
await getattr(plugin, "post_{}".format(func.__name__))(
self, *args, took=end - start, ret=ret, **kwargs
)
return ret
return _plugins
class BaseCache(Generic[CacheKeyType]):
"""
Base class that agregates the common logic for the different caches that may exist. Cache
related available options are:
:param serializer: obj derived from :class:`aiocache.serializers.BaseSerializer`. Default is
:class:`aiocache.serializers.StringSerializer`.
:param plugins: list of :class:`aiocache.plugins.BasePlugin` derived classes. Default is empty
list.
:param namespace: string to use as default prefix for the key used in all operations of
the backend. Default is an empty string, "".
:param key_builder: alternative callable to build the key. Receives the key and the namespace
as params and should return a string that can be used as a key by the underlying backend.
:param timeout: int or float in seconds specifying maximum timeout for the operations to last.
By default its 5. Use 0 or None if you want to disable it.
:param ttl: int the expiration time in seconds to use as a default in all operations of
the backend. It can be overriden in the specific calls.
"""
NAME: str
def __init__(
self,
serializer: Optional["BaseSerializer"] = None,
plugins: Optional[List["BasePlugin"]] = None,
namespace: str = "",
key_builder: Callable[[str, str], str] = lambda key, namespace: f"{namespace}{key}",
timeout: Optional[float] = 5,
ttl: Optional[float] = None,
):
self.timeout = float(timeout) if timeout is not None else None
self.ttl = float(ttl) if ttl is not None else None
self.namespace = namespace
self._build_key = key_builder
self._serializer = serializer or StringSerializer()
self._plugins = plugins or []
@property
def serializer(self):
return self._serializer
@serializer.setter
def serializer(self, value):
self._serializer = value
@property
def plugins(self):
return self._plugins
@plugins.setter
def plugins(self, value):
self._plugins = value
@API.register
@API.aiocache_enabled(fake_return=True)
@API.timeout
@API.plugins
async def add(self, key, value, ttl=SENTINEL, dumps_fn=None, namespace=None, _conn=None):
"""
Stores the value in the given key with ttl if specified. Raises an error if the
key already exists.
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if key is inserted
:raises:
- ValueError if key already exists
- :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
dumps = dumps_fn or self.serializer.dumps
ns_key = self.build_key(key, namespace)
await self._add(ns_key, dumps(value), ttl=self._get_ttl(ttl), _conn=_conn)
logger.debug("ADD %s %s (%.4f)s", ns_key, True, time.monotonic() - start)
return True
async def _add(self, key, value, ttl, _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled()
@API.timeout
@API.plugins
async def get(self, key, default=None, loads_fn=None, namespace=None, _conn=None):
"""
Get a value from the cache. Returns default if not found.
:param key: str
:param default: obj to return when key is not found
:param loads_fn: callable alternative to use as loads function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: obj loaded
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
loads = loads_fn or self.serializer.loads
ns_key = self.build_key(key, namespace)
value = loads(await self._get(ns_key, encoding=self.serializer.encoding, _conn=_conn))
logger.debug("GET %s %s (%.4f)s", ns_key, value is not None, time.monotonic() - start)
return value if value is not None else default
async def _get(self, key, encoding, _conn=None):
raise NotImplementedError()
async def _gets(self, key, encoding="utf-8", _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled(fake_return=[])
@API.timeout
@API.plugins
async def multi_get(self, keys, loads_fn=None, namespace=None, _conn=None):
"""
Get multiple values from the cache, values not found are Nones.
:param keys: list of str
:param loads_fn: callable alternative to use as loads function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: list of objs
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
loads = loads_fn or self.serializer.loads
ns_keys = [self.build_key(key, namespace) for key in keys]
values = [
loads(value)
for value in await self._multi_get(
ns_keys, encoding=self.serializer.encoding, _conn=_conn
)
]
logger.debug(
"MULTI_GET %s %d (%.4f)s",
ns_keys,
len([value for value in values if value is not None]),
time.monotonic() - start,
)
return values
async def _multi_get(self, keys, encoding, _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled(fake_return=True)
@API.timeout
@API.plugins
async def set(
self, key, value, ttl=SENTINEL, dumps_fn=None, namespace=None, _cas_token=None, _conn=None
):
"""
Stores the value in the given key with ttl if specified
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if the value was set
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
dumps = dumps_fn or self.serializer.dumps
ns_key = self.build_key(key, namespace)
res = await self._set(
ns_key, dumps(value), ttl=self._get_ttl(ttl), _cas_token=_cas_token, _conn=_conn
)
logger.debug("SET %s %d (%.4f)s", ns_key, True, time.monotonic() - start)
return res
async def _set(self, key, value, ttl, _cas_token=None, _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled(fake_return=True)
@API.timeout
@API.plugins
async def multi_set(self, pairs, ttl=SENTINEL, dumps_fn=None, namespace=None, _conn=None):
"""
Stores multiple values in the given keys.
:param pairs: list of two element iterables. First is key and second is value
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
dumps = dumps_fn or self.serializer.dumps
tmp_pairs = []
for key, value in pairs:
tmp_pairs.append((self.build_key(key, namespace), dumps(value)))
await self._multi_set(tmp_pairs, ttl=self._get_ttl(ttl), _conn=_conn)
logger.debug(
"MULTI_SET %s %d (%.4f)s",
[key for key, value in tmp_pairs],
len(pairs),
time.monotonic() - start,
)
return True
async def _multi_set(self, pairs, ttl, _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled(fake_return=0)
@API.timeout
@API.plugins
async def delete(self, key, namespace=None, _conn=None):
"""
Deletes the given key.
:param key: Key to be deleted
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: int number of deleted keys
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ns_key = self.build_key(key, namespace)
ret = await self._delete(ns_key, _conn=_conn)
logger.debug("DELETE %s %d (%.4f)s", ns_key, ret, time.monotonic() - start)
return ret
async def _delete(self, key, _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled(fake_return=False)
@API.timeout
@API.plugins
async def exists(self, key, namespace=None, _conn=None):
"""
Check key exists in the cache.
:param key: str key to check
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if key exists otherwise False
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ns_key = self.build_key(key, namespace)
ret = await self._exists(ns_key, _conn=_conn)
logger.debug("EXISTS %s %d (%.4f)s", ns_key, ret, time.monotonic() - start)
return ret
async def _exists(self, key, _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled(fake_return=1)
@API.timeout
@API.plugins
async def increment(self, key, delta=1, namespace=None, _conn=None):
"""
Increments value stored in key by delta (can be negative). If key doesn't
exist, it creates the key with delta as value.
:param key: str key to check
:param delta: int amount to increment/decrement
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: Value of the key once incremented. -1 if key is not found.
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
:raises: :class:`TypeError` if value is not incrementable
"""
start = time.monotonic()
ns_key = self.build_key(key, namespace)
ret = await self._increment(ns_key, delta, _conn=_conn)
logger.debug("INCREMENT %s %d (%.4f)s", ns_key, ret, time.monotonic() - start)
return ret
async def _increment(self, key, delta, _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled(fake_return=False)
@API.timeout
@API.plugins
async def expire(self, key, ttl, namespace=None, _conn=None):
"""
Set the ttl to the given key. By setting it to 0, it will disable it
:param key: str key to expire
:param ttl: int number of seconds for expiration. If 0, ttl is disabled
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if set, False if key is not found
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ns_key = self.build_key(key, namespace)
ret = await self._expire(ns_key, ttl, _conn=_conn)
logger.debug("EXPIRE %s %d (%.4f)s", ns_key, ret, time.monotonic() - start)
return ret
async def _expire(self, key, ttl, _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled(fake_return=True)
@API.timeout
@API.plugins
async def clear(self, namespace=None, _conn=None):
"""
Clears the cache in the cache namespace. If an alternative namespace is given, it will
clear those ones instead.
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ret = await self._clear(namespace, _conn=_conn)
logger.debug("CLEAR %s %d (%.4f)s", namespace, ret, time.monotonic() - start)
return ret
async def _clear(self, namespace, _conn=None):
raise NotImplementedError()
@API.register
@API.aiocache_enabled()
@API.timeout
@API.plugins
async def raw(self, command, *args, _conn=None, **kwargs):
"""
Send the raw command to the underlying client. Note that by using this CMD you
will lose compatibility with other backends.
Due to limitations with aiomcache client, args have to be provided as bytes.
For rest of backends, str.
:param command: str with the command.
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: whatever the underlying client returns
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ret = await self._raw(
command, *args, encoding=self.serializer.encoding, _conn=_conn, **kwargs
)
logger.debug("%s (%.4f)s", command, time.monotonic() - start)
return ret
async def _raw(self, command, *args, **kwargs):
raise NotImplementedError()
async def _redlock_release(self, key, value):
raise NotImplementedError()
@API.timeout
async def close(self, *args, _conn=None, **kwargs):
"""
Perform any resource clean up necessary to exit the program safely.
After closing, cmd execution is still possible but you will have to
close again before exiting.
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ret = await self._close(*args, _conn=_conn, **kwargs)
logger.debug("CLOSE (%.4f)s", time.monotonic() - start)
return ret
async def _close(self, *args, **kwargs):
pass
@abstractmethod
def build_key(self, key: str, namespace: Optional[str] = None) -> CacheKeyType:
raise NotImplementedError()
def _str_build_key(self, key: str, namespace: Optional[str] = None) -> str:
"""Simple key builder that can be used in subclasses for build_key()."""
key_name = key.value if isinstance(key, Enum) else key
ns = self.namespace if namespace is None else namespace
return self._build_key(key_name, ns)
def _get_ttl(self, ttl):
return ttl if ttl is not SENTINEL else self.ttl
def get_connection(self):
return _Conn(self)
async def acquire_conn(self):
return self
async def release_conn(self, conn):
pass
async def __aenter__(self):
return self
async def __aexit__(
self, exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException], tb: Optional[TracebackType]
) -> None:
await self.close()
class _Conn:
def __init__(self, cache):
self._cache = cache
self._conn = None
async def __aenter__(self):
self._conn = await self._cache.acquire_conn()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self._cache.release_conn(self._conn)
def __getattr__(self, name):
return self._cache.__getattribute__(name)
@classmethod
def _inject_conn(cls, cmd_name):
async def _do_inject_conn(self, *args, **kwargs):
return await getattr(self._cache, cmd_name)(*args, _conn=self._conn, **kwargs)
return _do_inject_conn
for cmd in API.CMDS:
setattr(_Conn, cmd.__name__, _Conn._inject_conn(cmd.__name__))