From 4794e70c716ab3b9c272a0eaa6e200e50b3d7105 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 22:34:50 +0100 Subject: [PATCH 01/65] Remove ptyhon 3.3 support. Bump version to 2.0.0. Update requirements to only support redis-py 3.0.x release track --- rediscluster/__init__.py | 2 +- requirements.txt | 2 +- setup.py | 5 ++--- tox.ini | 10 +++++----- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index cb3d40f0..0b270efb 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -16,7 +16,7 @@ setattr(redis, "StrictClusterPipeline", StrictClusterPipeline) # Major, Minor, Fix version -__version__ = (1, 3, 6) +__version__ = (2, 0, 0) if sys.version_info[0:3] == (3, 4, 0): raise RuntimeError("CRITICAL: rediscluster do not work with python 3.4.0. Please use 3.4.1 or higher.") diff --git a/requirements.txt b/requirements.txt index 91015469..53e431b5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -redis==2.10.6 +redis>=3.0.0,<3.1.0 diff --git a/setup.py b/setup.py index 40bbc3f9..8215d9fa 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( name="redis-py-cluster", - version="1.3.6", + version="2.0.0", description="Library for communicating with Redis Clusters. Built on top of redis-py lib", long_description=readme + '\n\n' + history, long_description_content_type="text/markdown", @@ -32,7 +32,7 @@ url='http://github.com/grokzen/redis-py-cluster', license='MIT', install_requires=[ - 'redis==2.10.6' + 'redis>=3.0.0,<3.1.0' ], keywords=[ 'redis', @@ -51,7 +51,6 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', diff --git a/tox.ini b/tox.ini index 5e63762f..0c3125fa 100644 --- a/tox.ini +++ b/tox.ini @@ -24,30 +24,30 @@ deps = [testenv:hi35] basepython = python3.5 -deps = +deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:hi36] basepython = python3.6 -deps = +deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:hi37] basepython = python3.7 -deps = +deps = -r{toxinidir}/dev-requirements.txt hiredis == 0.2.0 [testenv:flake8-py34] basepython= python3.4 -deps = +deps = flake8==2.2.5 commands = flake8 --show-source --exclude=.venv,.tox,dist,docs,build,.git --ignore=E501,E731,E402 . [testenv:flake8-py27] basepython= python2.7 -deps = +deps = flake8==2.2.5 commands = flake8 --show-source --exclude=.venv,.tox,dist,docs,build,.git --ignore=E501,E731,E402 . From 211659a9cee6295dad645ebd722ad6221b657698 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 22:36:20 +0100 Subject: [PATCH 02/65] Use pip cache for travis build --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 6080ddb2..1b8ad1d1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,7 @@ sudo: false dist: xenial language: python +cache: pip python: - "2.7" - "3.4" From b3100ce8fe43086a09f05a986c04e0c8735b92cb Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:15:09 +0100 Subject: [PATCH 03/65] Rename StrictRedisCluster -> RedisCluster and Removed old RedisCluster class to conform to new redis-py class names. Updated all usages of the 2 classes throughout the entire codebase --- Makefile | 8 +- README.md | 4 +- benchmarks/simple.py | 13 +-- docs/benchmarks.rst | 14 +-- docs/commands.rst | 6 +- docs/index.rst | 4 +- docs/limitations-and-differences.rst | 6 +- docs/pipelines.rst | 4 +- docs/pubsub.rst | 6 +- docs/readonly-mode.rst | 10 +- docs/threads.rst | 4 +- examples/basic.py | 4 +- examples/basic_password_protected.py | 4 +- examples/from_url_password_protected.py | 4 +- ptp-debug.py | 6 +- rediscluster/__init__.py | 3 +- rediscluster/client.py | 105 ++----------------- rediscluster/connection.py | 4 +- rediscluster/nodemanager.py | 6 +- rediscluster/pipeline.py | 128 ++++++++++++------------ tests/conftest.py | 26 ++--- tests/test_cluster_connection_pool.py | 2 +- tests/test_cluster_obj.py | 56 +++++------ tests/test_node_manager.py | 18 ++-- tests/test_pipeline.py | 8 +- tests/test_pubsub.py | 10 +- 26 files changed, 188 insertions(+), 275 deletions(-) diff --git a/Makefile b/Makefile index e0fbac7a..0c2e1766 100644 --- a/Makefile +++ b/Makefile @@ -378,16 +378,16 @@ redis-install: benchmark: @echo "" - @echo " -- Running Simple benchmark with StrictRedis lib and non cluster server --" + @echo " -- Running Simple benchmark with Redis lib and non cluster server --" python benchmarks/simple.py --port 7007 --timeit --nocluster @echo "" - @echo " -- Running Simple benchmark with StrictRedisCluster lib and cluster server --" + @echo " -- Running Simple benchmark with RedisCluster lib and cluster server --" python benchmarks/simple.py --port 7001 --timeit @echo "" - @echo " -- Running Simple benchmark with pipelines & StrictRedis lib and non cluster server --" + @echo " -- Running Simple benchmark with pipelines & Redis lib and non cluster server --" python benchmarks/simple.py --port 7007 --timeit --pipeline --nocluster @echo "" - @echo " -- Running Simple benchmark with StrictRedisCluster lib and cluster server" + @echo " -- Running Simple benchmark with RedisCluster lib and cluster server" python benchmarks/simple.py --port 7001 --timeit --pipeline ptp: diff --git a/README.md b/README.md index 87b53d17..432b8b2f 100644 --- a/README.md +++ b/README.md @@ -37,12 +37,12 @@ $ pip install redis-py-cluster Small sample script that shows how to get started with RedisCluster. It can also be found in [examples/basic.py](examples/basic.py) ```python ->>> from rediscluster import StrictRedisCluster +>>> from rediscluster import RedisCluster >>> # Requires at least one node for cluster discovery. Multiple nodes is recommended. >>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] ->>> rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) +>>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) >>> rc.set("foo", "bar") True diff --git a/benchmarks/simple.py b/benchmarks/simple.py index 1aac44b9..bb6a7175 100644 --- a/benchmarks/simple.py +++ b/benchmarks/simple.py @@ -9,7 +9,7 @@ -p Port on redis server [default: 7000] -n Request number [default: 100000] -c Concurrent client number [default: 1] - --nocluster If flag is set then StrictRedis will be used instead of cluster lib + --nocluster If flag is set then Redis will be used instead of cluster lib --timeit Run a mini benchmark to test performance --pipeline Only usable with --timeit flag. Runs SET/GET inside pipelines. --resetlastkey Reset __last__ key @@ -78,12 +78,13 @@ def timeit_pipeline(rc, num): if __name__ == "__main__": args = docopt(__doc__, version="0.3.1") startup_nodes = [{"host": args['--host'], "port": args['-p']}] + if not args["--nocluster"]: - from rediscluster import StrictRedisCluster - rc = StrictRedisCluster(startup_nodes=startup_nodes, max_connections=32, socket_timeout=0.1, decode_responses=True) + from rediscluster import RedisCluster + rc = RedisCluster(startup_nodes=startup_nodes, max_connections=32, socket_timeout=0.1, decode_responses=True) else: - from redis import StrictRedis - rc = StrictRedis(host=args["--host"], port=args["-p"], socket_timeout=0.1, decode_responses=True) + from redis import Redis + rc = Redis(host=args["--host"], port=args["-p"], socket_timeout=0.1, decode_responses=True) # create specified number processes processes = [] single_request = int(args["-n"]) // int(args["-c"]) @@ -102,4 +103,4 @@ def timeit_pipeline(rc, num): for p in processes: p.join() t2 = time.time() - t1 - print("Tested {0}k SET & GET (each 50%) operations took: {1} seconds... {2} operations per second".format(int(args["-n"]) / 1000, t2, int(args["-n"]) / t2 * 2)) \ No newline at end of file + print("Tested {0}k SET & GET (each 50%) operations took: {1} seconds... {2} operations per second".format(int(args["-n"]) / 1000, t2, int(args["-n"]) / t2 * 2)) diff --git a/docs/benchmarks.rst b/docs/benchmarks.rst index dc2dc770..de8ce060 100644 --- a/docs/benchmarks.rst +++ b/docs/benchmarks.rst @@ -8,12 +8,12 @@ These are a few benchmarks that are designed to test specific parts of the code Setup benchmarks ---------------- -Before running any benchmark you should install this lib in editable mode inside a virtualenv so it can import `StrictRedisCluster` lib. +Before running any benchmark you should install this lib in editable mode inside a virtualenv so it can import `RedisCluster` lib. Install with .. code-block:: bash - + pip install -e . You also need a few redis servers to test against. You must have one cluster with at least one node on port `7001` and you must also have a non-clustered server on port `7007`. @@ -42,18 +42,18 @@ Example output and comparison of different runmodes .. code-block:: - -- Running Simple benchmark with StrictRedis lib and non cluster server, 50 concurrent processes and total 50000*2 requests -- + -- Running Simple benchmark with Redis lib and non cluster server, 50 concurrent processes and total 50000*2 requests -- python benchmarks/simple.py --host 127.0.0.1 --timeit --nocluster -c 50 -n 50000 50.0k SET/GET operations took: 2.45 seconds... 40799.93 operations per second - -- Running Simple benchmark with StrictRedisCluster lib and cluster server, 50 concurrent processes and total 50000*2 requests -- + -- Running Simple benchmark with RedisCluster lib and cluster server, 50 concurrent processes and total 50000*2 requests -- python benchmarks/simple.py --host 127.0.0.1 --timeit -c 50 -n 50000 50.0k SET & GET (each 50%) operations took: 9.51 seconds... 31513.71 operations per second - -- Running Simple benchmark with pipelines & StrictRedis lib and non cluster server -- + -- Running Simple benchmark with pipelines & Redis lib and non cluster server -- python benchmarks/simple.py --host 127.0.0.1 --timeit --nocluster -c 50 -n 50000 --pipeline 50.0k SET & GET (each 50%) operations took: 2.1728243827819824 seconds... 46023.047602201834 operations per second - -- Running Simple benchmark with StrictRedisCluster lib and cluster server + -- Running Simple benchmark with RedisCluster lib and cluster server python benchmarks/simple.py --host 127.0.0.1 --timeit -c 50 -n 50000 --pipeline - 50.0k SET & GET (each 50%) operations took: 1.7181339263916016 seconds... 58202.68051514381 operations per second \ No newline at end of file + 50.0k SET & GET (each 50%) operations took: 1.7181339263916016 seconds... 58202.68051514381 operations per second diff --git a/docs/commands.rst b/docs/commands.rst index 57332057..42f26931 100644 --- a/docs/commands.rst +++ b/docs/commands.rst @@ -1,9 +1,9 @@ Implemented commands ==================== -This will describe all changes that StrictRedisCluster have done to make a command to work in a cluster environment. +This will describe all changes that RedisCluster have done to make a command to work in a cluster environment. -If a command is not listed here then the default implementation from `StrictRedis` in the `redis-py` library is used. +If a command is not listed here then the default implementation from `Redis` in the `redis-py` library is used. @@ -107,7 +107,7 @@ Either because they do not work, there is no working implementation or it is not Overridden methods ------------------ -The following methods is overridden from StrictRedis with a custom implementation. +The following methods is overridden from Redis with a custom implementation. They can operate on keys that exists in different hashslots and require a client side implementation to work. diff --git a/docs/index.rst b/docs/index.rst index 5c376a09..9041cbcf 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -39,13 +39,13 @@ Small sample script that shows how to get started with RedisCluster. It can also .. code-block:: python - >>> from rediscluster import StrictRedisCluster + >>> from rediscluster import RedisCluster >>> # Requires at least one node for cluster discovery. Multiple nodes is recommended. >>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] >>> # Note: See note on Python 3 for decode_responses behaviour - >>> rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) + >>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) >>> rc.set("foo", "bar") True diff --git a/docs/limitations-and-differences.rst b/docs/limitations-and-differences.rst index d9208e1a..7b37e62d 100644 --- a/docs/limitations-and-differences.rst +++ b/docs/limitations-and-differences.rst @@ -5,11 +5,11 @@ This will compare against `redis-py` There is alot of differences that have to be taken into consideration when using redis cluster. -Any method that can operate on multiple keys have to be reimplemented in the client and in some cases that is not possible to do. In general any method that is overriden in StrictRedisCluster have lost the ability of being atomic. +Any method that can operate on multiple keys have to be reimplemented in the client and in some cases that is not possible to do. In general any method that is overriden in RedisCluster have lost the ability of being atomic. -Pipelines do not work the same way in a cluster. In `StrictRedis` it batch all commands so that they can be executed at the same time when requested. But with RedisCluster pipelines will send the command directly to the server when it is called, but it will still store the result internally and return the same data from .execute(). This is done so that the code still behaves like a pipeline and no code will break. A better solution will be implemented in the future. +Pipelines do not work the same way in a cluster. In `Redis` it batch all commands so that they can be executed at the same time when requested. But with RedisCluster pipelines will send the command directly to the server when it is called, but it will still store the result internally and return the same data from .execute(). This is done so that the code still behaves like a pipeline and no code will break. A better solution will be implemented in the future. -Alot of methods will behave very different when using RedisCluster. Some methods send the same request to all servers and return the result in another format then `StrictRedis` do. Some methods is blocked because they do not work / is not implemented / is dangerous to use in redis cluster. +Alot of methods will behave very different when using RedisCluster. Some methods send the same request to all servers and return the result in another format then `Redis` do. Some methods is blocked because they do not work / is not implemented / is dangerous to use in redis cluster. Some of the commands are only partially supported when using RedisCluster. The commands ``zinterstore`` and ``zunionstore`` are only supported if all the keys map to the same key slot in the cluster. This can be achieved by namespacing related keys with a prefix followed by a bracketed common key. Example: diff --git a/docs/pipelines.rst b/docs/pipelines.rst index 94fad872..c092b604 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -101,7 +101,7 @@ This code do NOT wrap `MULTI/EXEC` around the commands when packed .. code-block:: python - >>> from rediscluster import StrictRedisCluster as s + >>> from rediscluster import RedisCluster as s >>> r = s(startup_nodes=[{"host": "127.0.0.1", "port": "7002"}]) >>> # Simulate that a slot is migrating to another node >>> r.connection_pool.nodes.slots[14226] = {'host': '127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001'} @@ -120,7 +120,7 @@ This code DO wrap MULTI/EXEC around the commands when packed .. code-block:: python - >>> from rediscluster import StrictRedisCluster as s + >>> from rediscluster import RedisCluster as s >>> r = s(startup_nodes=[{"host": "127.0.0.1", "port": "7002"}]) >>> # Simulate that a slot is migrating to another node >>> r.connection_pool.nodes.slots[14226] = {'host': '127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001'} diff --git a/docs/pubsub.rst b/docs/pubsub.rst index 928e6efe..9bb76074 100644 --- a/docs/pubsub.rst +++ b/docs/pubsub.rst @@ -40,8 +40,8 @@ The following part is from this discussion https://groups.google.com/forum/?hl=s -How pubsub works in StrictRedisCluster --------------------------------------- +How pubsub works in RedisCluster +-------------------------------- In release `1.2.0` the pubsub was code was reworked to now work like this. @@ -69,4 +69,4 @@ The implemented solution will only work if other clients use/adopt the same beha Other solutions --------------- -The simplest solution is to have a seperate non clustered redis instance that you have a regular `StrictRedis` instance that works with your pubsub code. It is not recommended to use pubsub until `redis` fixes the implementation in the server itself. +The simplest solution is to have a seperate non clustered redis instance that you have a regular `Redis` instance that works with your pubsub code. It is not recommended to use pubsub until `redis` fixes the implementation in the server itself. diff --git a/docs/readonly-mode.rst b/docs/readonly-mode.rst index 5ca197e1..29d885f4 100644 --- a/docs/readonly-mode.rst +++ b/docs/readonly-mode.rst @@ -3,17 +3,17 @@ Readonly mode By default, Redis Cluster always returns MOVE redirection response on accessing slave node. You can overcome this limitation [for scaling read with READONLY mode](http://redis.io/topics/cluster-spec#scaling-reads-using-slave-nodes). -redis-py-cluster also implements this mode. You can access slave by passing `readonly_mode=True` to StrictRedisCluster (or RedisCluster) constructor. +redis-py-cluster also implements this mode. You can access slave by passing `readonly_mode=True` to RedisCluster (or RedisCluster) constructor. .. code-block:: python - >>> from rediscluster import StrictRedisCluster + >>> from rediscluster import RedisCluster >>> startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] - >>> rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) + >>> rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) >>> rc.set("foo16706", "bar") >>> rc.set("foo81", "foo") True - >>> rc_readonly = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) + >>> rc_readonly = RedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) >>> rc_readonly.get("foo16706") u'bar' >>> rc_readonly.get("foo81") @@ -39,7 +39,7 @@ But this mode has some downside or limitations. .. code-block:: python - >>> rc_readonly = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) + >>> rc_readonly = RedisCluster(startup_nodes=startup_nodes, decode_responses=True, readonly_mode=True) >>> # NO: This works in almost case, but possibly emits Too many Cluster redirections error... >>> rc_readonly.set('foo', 'bar') >>> # OK: You should always use get related stuff... diff --git a/docs/threads.rst b/docs/threads.rst index d6f2d869..790db8c7 100644 --- a/docs/threads.rst +++ b/docs/threads.rst @@ -25,7 +25,7 @@ You can disable threaded execution either in the class constructor: .. code-block:: python - r = rediscluster.StrictRedisCluster( ... pipeline_use_threads=False) #true by default + r = rediscluster.RedisCluster( ... pipeline_use_threads=False) #true by default pipe = r.pipeline() Or you can disable it on a case by case basis as you instantiate the pipeline object. @@ -34,7 +34,7 @@ Or you can disable it on a case by case basis as you instantiate the pipeline ob pipe = r.pipeline(use_threads=False) -The later example always overrides if explicitly set. Otherwise, it falls back on the value passed to the StrictRedisCluster constructor. +The later example always overrides if explicitly set. Otherwise, it falls back on the value passed to the RedisCluster constructor. diff --git a/examples/basic.py b/examples/basic.py index 4569a965..d64a277e 100644 --- a/examples/basic.py +++ b/examples/basic.py @@ -1,9 +1,9 @@ -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] # Note: decode_responses must be set to True when used with python3 -rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) +rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) rc.set("foo", "bar") diff --git a/examples/basic_password_protected.py b/examples/basic_password_protected.py index 0bd6a9e5..59d3ce0b 100644 --- a/examples/basic_password_protected.py +++ b/examples/basic_password_protected.py @@ -1,9 +1,9 @@ -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7100"}] # Note: decode_responses must be set to True when used with python3 -rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, password='password_is_protected') +rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True, password='password_is_protected') rc.set("foo", "bar") diff --git a/examples/from_url_password_protected.py b/examples/from_url_password_protected.py index e908b024..526aeec1 100644 --- a/examples/from_url_password_protected.py +++ b/examples/from_url_password_protected.py @@ -1,8 +1,8 @@ -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster url="redis://:R1NFTBWTE1@10.127.91.90:6572/0" -rc = StrictRedisCluster.from_url(url, skip_full_coverage_check=True) +rc = RedisCluster.from_url(url, skip_full_coverage_check=True) rc.set("foo", "bar") diff --git a/ptp-debug.py b/ptp-debug.py index 0c4b6e31..65d6dc4f 100644 --- a/ptp-debug.py +++ b/ptp-debug.py @@ -1,9 +1,9 @@ -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] # Note: decode_responses must be set to True when used with python3 -rc = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True) -url_client = StrictRedisCluster.from_url('http://127.0.0.1:7000') +rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) +url_client = RedisCluster.from_url('http://127.0.0.1:7000') __import__('ptpdb').set_trace() diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 0b270efb..5024086d 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -4,13 +4,12 @@ import sys # Import shortcut -from .client import StrictRedisCluster, RedisCluster +from .client import RedisCluster from .pipeline import StrictClusterPipeline from .pubsub import ClusterPubSub # Monkey patch RedisCluster class into redis for easy access import redis -setattr(redis, "StrictRedisCluster", StrictRedisCluster) setattr(redis, "RedisCluster", RedisCluster) setattr(redis, "ClusterPubSub", ClusterPubSub) setattr(redis, "StrictClusterPipeline", StrictClusterPipeline) diff --git a/rediscluster/client.py b/rediscluster/client.py index 49046dde..e37178eb 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -33,16 +33,16 @@ parse_pubsub_numpat, ) # 3rd party imports -from redis import StrictRedis +from redis import Redis from redis.client import list_or_args, parse_info from redis.connection import Token from redis._compat import iteritems, basestring, b, izip, nativestr, long from redis.exceptions import RedisError, ResponseError, TimeoutError, DataError, ConnectionError, BusyLoadingError -class StrictRedisCluster(StrictRedis): +class RedisCluster(Redis): """ - If a command is implemented over the one in StrictRedis then it requires some changes compared to + If a command is implemented over the one in Redis then it requires some changes compared to the regular implementation of the method. """ RedisClusterRequestTTL = 16 @@ -172,13 +172,13 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non it was operating on. This will allow the client to drift along side the cluster if the cluster nodes move around alot. :**kwargs: - Extra arguments that will be sent into StrictRedis instance when created + Extra arguments that will be sent into Redis instance when created (See Official redis-py doc for supported kwargs [https://github.com/andymccurdy/redis-py/blob/master/redis/client.py]) Some kwargs is not supported and will raise RedisClusterException - db (Redis do not support database SELECT in cluster mode) """ - # Tweaks to StrictRedis client arguments when running in cluster mode + # Tweaks to Redis client arguments when running in cluster mode if "db" in kwargs: raise RedisClusterException("Argument 'db' is not possible to use in cluster mode") @@ -213,7 +213,7 @@ def __init__(self, host=None, port=None, startup_nodes=None, max_connections=Non **kwargs ) - super(StrictRedisCluster, self).__init__(connection_pool=pool, **kwargs) + super(RedisCluster, self).__init__(connection_pool=pool, **kwargs) self.refresh_table_asap = False self.nodes_flags = self.__class__.NODES_FLAGS.copy() @@ -293,7 +293,7 @@ def transaction(self, *args, **kwargs): """ Transaction is not implemented in cluster mode yet. """ - raise RedisClusterException("method StrictRedisCluster.transaction() is not implemented") + raise RedisClusterException("method RedisCluster.transaction() is not implemented") def _determine_slot(self, *args): """ @@ -716,7 +716,7 @@ def mget(self, keys, *args): Cluster impl: Itterate all keys and send GET for each key. - This will go alot slower than a normal mget call in StrictRedis. + This will go alot slower than a normal mget call in Redis. Operation is no longer atomic. """ @@ -815,7 +815,7 @@ def delete(self, *names): Cluster impl: Iterate all keys and send DELETE for each key. - This will go a lot slower than a normal delete call in StrictRedis. + This will go a lot slower than a normal delete call in Redis. Operation is no longer atomic. """ @@ -1245,91 +1245,4 @@ def _random_id(self, size=16, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) -class RedisCluster(StrictRedisCluster): - """ - Provides backwards compatibility with older versions of redis-py that - changed arguments to some commands to be more Pythonic, sane, or by - accident. - """ - # Overridden callbacks - RESPONSE_CALLBACKS = dict_merge( - StrictRedis.RESPONSE_CALLBACKS, - { - 'TTL': lambda r: r >= 0 and r or None, - 'PTTL': lambda r: r >= 0 and r or None, - } - ) - - def pipeline(self, transaction=True, shard_hint=None): - """ - Return a new pipeline object that can queue multiple commands for - later execution. ``transaction`` indicates whether all commands - should be executed atomically. Apart from making a group of operations - atomic, pipelines are useful for reducing the back-and-forth overhead - between the client and server. - """ - if shard_hint: - raise RedisClusterException("shard_hint is deprecated in cluster mode") - - if transaction: - raise RedisClusterException("transaction is deprecated in cluster mode") - - return StrictClusterPipeline( - connection_pool=self.connection_pool, - startup_nodes=self.connection_pool.nodes.startup_nodes, - response_callbacks=self.response_callbacks - ) - - def setex(self, name, value, time): - """ - Set the value of key ``name`` to ``value`` that expires in ``time`` - seconds. ``time`` can be represented by an integer or a Python - timedelta object. - """ - if isinstance(time, datetime.timedelta): - time = time.seconds + time.days * 24 * 3600 - - return self.execute_command('SETEX', name, time, value) - - def lrem(self, name, value, num=0): - """ - Remove the first ``num`` occurrences of elements equal to ``value`` - from the list stored at ``name``. - The ``num`` argument influences the operation in the following ways: - num > 0: Remove elements equal to value moving from head to tail. - num < 0: Remove elements equal to value moving from tail to head. - num = 0: Remove all elements equal to value. - """ - return self.execute_command('LREM', name, num, value) - - def zadd(self, name, *args, **kwargs): - """ - NOTE: The order of arguments differs from that of the official ZADD - command. For backwards compatability, this method accepts arguments - in the form of name1, score1, name2, score2, while the official Redis - documents expects score1, name1, score2, name2. - If you're looking to use the standard syntax, consider using the - StrictRedis class. See the API Reference section of the docs for more - information. - Set any number of element-name, score pairs to the key ``name``. Pairs - can be specified in two ways: - As *args, in the form of: name1, score1, name2, score2, ... - or as **kwargs, in the form of: name1=score1, name2=score2, ... - The following example would add four values to the 'my-key' key: - redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4) - """ - pieces = [] - - if args: - if len(args) % 2 != 0: - raise RedisError("ZADD requires an equal number of values and scores") - pieces.extend(reversed(args)) - - for pair in iteritems(kwargs): - pieces.append(pair[1]) - pieces.append(pair[0]) - - return self.execute_command('ZADD', name, *pieces) - - from rediscluster.pipeline import StrictClusterPipeline diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 703cb2ae..ac6abc74 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -64,7 +64,7 @@ class SSLClusterConnection(SSLConnection): Manages TCP communication over TLS/SSL to and from a Redis cluster Usage: pool = ClusterConnectionPool(connection_class=SSLClusterConnection, ...) - client = StrictRedisCluster(connection_pool=pool) + client = RedisCluster(connection_pool=pool) """ description_format = "SSLClusterConnection" @@ -130,7 +130,7 @@ def __init__(self, startup_nodes=None, init_slot_cache=True, connection_class=No self.max_connections_per_node = max_connections_per_node if connection_class == SSLClusterConnection: - connection_kwargs['ssl'] = True # needed in StrictRedis init + connection_kwargs['ssl'] = True # needed in Redis init self.nodes = NodeManager( startup_nodes, diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index b16877d1..ed936c94 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -8,7 +8,7 @@ from .exceptions import RedisClusterException # 3rd party imports -from redis import StrictRedis +from redis import Redis from redis._compat import b, unicode, bytes, long, basestring from redis import ConnectionError, TimeoutError, ResponseError @@ -149,7 +149,7 @@ def get_redis_link(self, host, port, decode_responses=False): 'decode_responses', ) connection_kwargs = {k: v for k, v in self.connection_kwargs.items() if k in set(allowed_keys) - set(disabled_keys)} - return StrictRedis(host=host, port=port, decode_responses=decode_responses, **connection_kwargs) + return Redis(host=host, port=port, decode_responses=decode_responses, **connection_kwargs) def initialize(self): """ @@ -191,7 +191,7 @@ def initialize(self): if (len(cluster_slots) == 1 and len(cluster_slots[0][2][0]) == 0 and len(self.startup_nodes) == 1): cluster_slots[0][2][0] = self.startup_nodes[0]['host'] - # No need to decode response because StrictRedis should handle that for us... + # No need to decode response because Redis should handle that for us... for slot in cluster_slots: master_node = slot[2] diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index f6a21491..2e16dac7 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -4,14 +4,14 @@ import sys # rediscluster imports -from .client import StrictRedisCluster +from .client import RedisCluster from .exceptions import ( RedisClusterException, AskError, MovedError, TryAgainError, ) from .utils import clusterdown_wrapper, dict_merge # 3rd party imports -from redis import StrictRedis +from redis import Redis from redis.exceptions import ConnectionError, RedisError, TimeoutError from redis._compat import imap, unicode @@ -19,7 +19,7 @@ ERRORS_ALLOW_RETRY = (ConnectionError, TimeoutError, MovedError, AskError, TryAgainError) -class StrictClusterPipeline(StrictRedisCluster): +class ClusterPipeline(RedisCluster): """ """ @@ -296,66 +296,66 @@ def inner(*args, **kwargs): # Blocked pipeline commands -StrictClusterPipeline.bgrewriteaof = block_pipeline_command(StrictRedis.bgrewriteaof) -StrictClusterPipeline.bgsave = block_pipeline_command(StrictRedis.bgsave) -StrictClusterPipeline.bitop = block_pipeline_command(StrictRedis.bitop) -StrictClusterPipeline.brpoplpush = block_pipeline_command(StrictRedis.brpoplpush) -StrictClusterPipeline.client_getname = block_pipeline_command(StrictRedis.client_getname) -StrictClusterPipeline.client_kill = block_pipeline_command(StrictRedis.client_kill) -StrictClusterPipeline.client_list = block_pipeline_command(StrictRedis.client_list) -StrictClusterPipeline.client_setname = block_pipeline_command(StrictRedis.client_setname) -StrictClusterPipeline.config_get = block_pipeline_command(StrictRedis.config_get) -StrictClusterPipeline.config_resetstat = block_pipeline_command(StrictRedis.config_resetstat) -StrictClusterPipeline.config_rewrite = block_pipeline_command(StrictRedis.config_rewrite) -StrictClusterPipeline.config_set = block_pipeline_command(StrictRedis.config_set) -StrictClusterPipeline.dbsize = block_pipeline_command(StrictRedis.dbsize) -StrictClusterPipeline.echo = block_pipeline_command(StrictRedis.echo) -StrictClusterPipeline.evalsha = block_pipeline_command(StrictRedis.evalsha) -StrictClusterPipeline.flushall = block_pipeline_command(StrictRedis.flushall) -StrictClusterPipeline.flushdb = block_pipeline_command(StrictRedis.flushdb) -StrictClusterPipeline.info = block_pipeline_command(StrictRedis.info) -StrictClusterPipeline.keys = block_pipeline_command(StrictRedis.keys) -StrictClusterPipeline.lastsave = block_pipeline_command(StrictRedis.lastsave) -StrictClusterPipeline.mget = block_pipeline_command(StrictRedis.mget) -StrictClusterPipeline.move = block_pipeline_command(StrictRedis.move) -StrictClusterPipeline.mset = block_pipeline_command(StrictRedis.mset) -StrictClusterPipeline.msetnx = block_pipeline_command(StrictRedis.msetnx) -StrictClusterPipeline.pfmerge = block_pipeline_command(StrictRedis.pfmerge) -StrictClusterPipeline.pfcount = block_pipeline_command(StrictRedis.pfcount) -StrictClusterPipeline.ping = block_pipeline_command(StrictRedis.ping) -StrictClusterPipeline.publish = block_pipeline_command(StrictRedis.publish) -StrictClusterPipeline.randomkey = block_pipeline_command(StrictRedis.randomkey) -StrictClusterPipeline.rename = block_pipeline_command(StrictRedis.rename) -StrictClusterPipeline.renamenx = block_pipeline_command(StrictRedis.renamenx) -StrictClusterPipeline.rpoplpush = block_pipeline_command(StrictRedis.rpoplpush) -StrictClusterPipeline.save = block_pipeline_command(StrictRedis.save) -StrictClusterPipeline.scan = block_pipeline_command(StrictRedis.scan) -StrictClusterPipeline.script_exists = block_pipeline_command(StrictRedis.script_exists) -StrictClusterPipeline.script_flush = block_pipeline_command(StrictRedis.script_flush) -StrictClusterPipeline.script_kill = block_pipeline_command(StrictRedis.script_kill) -StrictClusterPipeline.script_load = block_pipeline_command(StrictRedis.script_load) -StrictClusterPipeline.sdiff = block_pipeline_command(StrictRedis.sdiff) -StrictClusterPipeline.sdiffstore = block_pipeline_command(StrictRedis.sdiffstore) -StrictClusterPipeline.sentinel_get_master_addr_by_name = block_pipeline_command(StrictRedis.sentinel_get_master_addr_by_name) -StrictClusterPipeline.sentinel_master = block_pipeline_command(StrictRedis.sentinel_master) -StrictClusterPipeline.sentinel_masters = block_pipeline_command(StrictRedis.sentinel_masters) -StrictClusterPipeline.sentinel_monitor = block_pipeline_command(StrictRedis.sentinel_monitor) -StrictClusterPipeline.sentinel_remove = block_pipeline_command(StrictRedis.sentinel_remove) -StrictClusterPipeline.sentinel_sentinels = block_pipeline_command(StrictRedis.sentinel_sentinels) -StrictClusterPipeline.sentinel_set = block_pipeline_command(StrictRedis.sentinel_set) -StrictClusterPipeline.sentinel_slaves = block_pipeline_command(StrictRedis.sentinel_slaves) -StrictClusterPipeline.shutdown = block_pipeline_command(StrictRedis.shutdown) -StrictClusterPipeline.sinter = block_pipeline_command(StrictRedis.sinter) -StrictClusterPipeline.sinterstore = block_pipeline_command(StrictRedis.sinterstore) -StrictClusterPipeline.slaveof = block_pipeline_command(StrictRedis.slaveof) -StrictClusterPipeline.slowlog_get = block_pipeline_command(StrictRedis.slowlog_get) -StrictClusterPipeline.slowlog_len = block_pipeline_command(StrictRedis.slowlog_len) -StrictClusterPipeline.slowlog_reset = block_pipeline_command(StrictRedis.slowlog_reset) -StrictClusterPipeline.smove = block_pipeline_command(StrictRedis.smove) -StrictClusterPipeline.sort = block_pipeline_command(StrictRedis.sort) -StrictClusterPipeline.sunion = block_pipeline_command(StrictRedis.sunion) -StrictClusterPipeline.sunionstore = block_pipeline_command(StrictRedis.sunionstore) -StrictClusterPipeline.time = block_pipeline_command(StrictRedis.time) +StrictClusterPipeline.bgrewriteaof = block_pipeline_command(Redis.bgrewriteaof) +StrictClusterPipeline.bgsave = block_pipeline_command(Redis.bgsave) +StrictClusterPipeline.bitop = block_pipeline_command(Redis.bitop) +StrictClusterPipeline.brpoplpush = block_pipeline_command(Redis.brpoplpush) +StrictClusterPipeline.client_getname = block_pipeline_command(Redis.client_getname) +StrictClusterPipeline.client_kill = block_pipeline_command(Redis.client_kill) +StrictClusterPipeline.client_list = block_pipeline_command(Redis.client_list) +StrictClusterPipeline.client_setname = block_pipeline_command(Redis.client_setname) +StrictClusterPipeline.config_get = block_pipeline_command(Redis.config_get) +StrictClusterPipeline.config_resetstat = block_pipeline_command(Redis.config_resetstat) +StrictClusterPipeline.config_rewrite = block_pipeline_command(Redis.config_rewrite) +StrictClusterPipeline.config_set = block_pipeline_command(Redis.config_set) +StrictClusterPipeline.dbsize = block_pipeline_command(Redis.dbsize) +StrictClusterPipeline.echo = block_pipeline_command(Redis.echo) +StrictClusterPipeline.evalsha = block_pipeline_command(Redis.evalsha) +StrictClusterPipeline.flushall = block_pipeline_command(Redis.flushall) +StrictClusterPipeline.flushdb = block_pipeline_command(Redis.flushdb) +StrictClusterPipeline.info = block_pipeline_command(Redis.info) +StrictClusterPipeline.keys = block_pipeline_command(Redis.keys) +StrictClusterPipeline.lastsave = block_pipeline_command(Redis.lastsave) +StrictClusterPipeline.mget = block_pipeline_command(Redis.mget) +StrictClusterPipeline.move = block_pipeline_command(Redis.move) +StrictClusterPipeline.mset = block_pipeline_command(Redis.mset) +StrictClusterPipeline.msetnx = block_pipeline_command(Redis.msetnx) +StrictClusterPipeline.pfmerge = block_pipeline_command(Redis.pfmerge) +StrictClusterPipeline.pfcount = block_pipeline_command(Redis.pfcount) +StrictClusterPipeline.ping = block_pipeline_command(Redis.ping) +StrictClusterPipeline.publish = block_pipeline_command(Redis.publish) +StrictClusterPipeline.randomkey = block_pipeline_command(Redis.randomkey) +StrictClusterPipeline.rename = block_pipeline_command(Redis.rename) +StrictClusterPipeline.renamenx = block_pipeline_command(Redis.renamenx) +StrictClusterPipeline.rpoplpush = block_pipeline_command(Redis.rpoplpush) +StrictClusterPipeline.save = block_pipeline_command(Redis.save) +StrictClusterPipeline.scan = block_pipeline_command(Redis.scan) +StrictClusterPipeline.script_exists = block_pipeline_command(Redis.script_exists) +StrictClusterPipeline.script_flush = block_pipeline_command(Redis.script_flush) +StrictClusterPipeline.script_kill = block_pipeline_command(Redis.script_kill) +StrictClusterPipeline.script_load = block_pipeline_command(Redis.script_load) +StrictClusterPipeline.sdiff = block_pipeline_command(Redis.sdiff) +StrictClusterPipeline.sdiffstore = block_pipeline_command(Redis.sdiffstore) +StrictClusterPipeline.sentinel_get_master_addr_by_name = block_pipeline_command(Redis.sentinel_get_master_addr_by_name) +StrictClusterPipeline.sentinel_master = block_pipeline_command(Redis.sentinel_master) +StrictClusterPipeline.sentinel_masters = block_pipeline_command(Redis.sentinel_masters) +StrictClusterPipeline.sentinel_monitor = block_pipeline_command(Redis.sentinel_monitor) +StrictClusterPipeline.sentinel_remove = block_pipeline_command(Redis.sentinel_remove) +StrictClusterPipeline.sentinel_sentinels = block_pipeline_command(Redis.sentinel_sentinels) +StrictClusterPipeline.sentinel_set = block_pipeline_command(Redis.sentinel_set) +StrictClusterPipeline.sentinel_slaves = block_pipeline_command(Redis.sentinel_slaves) +StrictClusterPipeline.shutdown = block_pipeline_command(Redis.shutdown) +StrictClusterPipeline.sinter = block_pipeline_command(Redis.sinter) +StrictClusterPipeline.sinterstore = block_pipeline_command(Redis.sinterstore) +StrictClusterPipeline.slaveof = block_pipeline_command(Redis.slaveof) +StrictClusterPipeline.slowlog_get = block_pipeline_command(Redis.slowlog_get) +StrictClusterPipeline.slowlog_len = block_pipeline_command(Redis.slowlog_len) +StrictClusterPipeline.slowlog_reset = block_pipeline_command(Redis.slowlog_reset) +StrictClusterPipeline.smove = block_pipeline_command(Redis.smove) +StrictClusterPipeline.sort = block_pipeline_command(Redis.sort) +StrictClusterPipeline.sunion = block_pipeline_command(Redis.sunion) +StrictClusterPipeline.sunionstore = block_pipeline_command(Redis.sunionstore) +StrictClusterPipeline.time = block_pipeline_command(Redis.time) class PipelineCommand(object): @@ -391,7 +391,7 @@ def append(self, c): def write(self): """ - Code borrowed from StrictRedis so it can be fixed + Code borrowed from Redis so it can be fixed """ connection = self.connection commands = self.commands diff --git a/tests/conftest.py b/tests/conftest.py index 87cbc01c..f359ccaa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,11 +6,11 @@ import json # rediscluster imports -from rediscluster import StrictRedisCluster, RedisCluster +from rediscluster import RedisCluster # 3rd party imports import pytest -from redis import StrictRedis +from redis import Redis from distutils.version import StrictVersion # put our path in front so we can be sure we are testing locally not against the global package @@ -100,33 +100,33 @@ def skip_if_redis_py_version_lt(min_version): @pytest.fixture() def o(request, *args, **kwargs): """ - Create a StrictRedisCluster instance with decode_responses set to True. + Create a RedisCluster instance with decode_responses set to True. """ - return _init_client(request, cls=StrictRedisCluster, decode_responses=True, **kwargs) + return _init_client(request, cls=RedisCluster, decode_responses=True, **kwargs) @pytest.fixture() def r(request, *args, **kwargs): """ - Create a StrictRedisCluster instance with default settings. + Create a RedisCluster instance with default settings. """ - return _init_client(request, cls=StrictRedisCluster, **kwargs) + return _init_client(request, cls=RedisCluster, **kwargs) @pytest.fixture() def ro(request, *args, **kwargs): """ - Create a StrictRedisCluster instance with readonly mode + Create a RedisCluster instance with readonly mode """ params = {'readonly_mode': True} params.update(kwargs) - return _init_client(request, cls=StrictRedisCluster, **params) + return _init_client(request, cls=RedisCluster, **params) @pytest.fixture() def s(*args, **kwargs): """ - Create a StrictRedisCluster instance with 'init_slot_cache' set to false + Create a RedisCluster instance with 'init_slot_cache' set to false """ s = _get_client(init_slot_cache=False, **kwargs) assert s.connection_pool.nodes.slots == {} @@ -137,14 +137,14 @@ def s(*args, **kwargs): @pytest.fixture() def t(*args, **kwargs): """ - Create a regular StrictRedis object instance + Create a regular Redis object instance """ - return StrictRedis(*args, **kwargs) + return Redis(*args, **kwargs) @pytest.fixture() def sr(request, *args, **kwargs): """ - Returns a instance of StrictRedisCluster + Returns a instance of RedisCluster """ - return _init_client(request, reinitialize_steps=1, cls=StrictRedisCluster, **kwargs) + return _init_client(request, reinitialize_steps=1, cls=RedisCluster, **kwargs) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 45efe007..5a432c3e 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -446,7 +446,7 @@ def test_calling_from_subclass_returns_correct_instance(self): assert isinstance(pool, redis.BlockingConnectionPool) def test_client_creates_connection_pool(self): - r = redis.StrictRedis.from_url('redis://myhost') + r = redis.Redis.from_url('redis://myhost') assert r.connection_pool.connection_class == redis.Connection assert r.connection_pool.connection_kwargs == { 'host': 'myhost', diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index db560bf3..34bf0c5b 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -6,7 +6,7 @@ import time # rediscluster imports -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool from rediscluster.exceptions import ( RedisClusterException, MovedError, AskError, ClusterDownError, @@ -17,7 +17,7 @@ # 3rd party imports from mock import patch, Mock, MagicMock from redis._compat import b, unicode -from redis import StrictRedis +from redis import Redis import pytest pytestmark = skip_if_server_version_lt('2.9.0') @@ -32,7 +32,7 @@ class DummyConnection(object): def test_representation(r): - assert re.search('^StrictRedisCluster<[0-9\.\:\,].+>$', str(r)) + assert re.search('^RedisCluster<[0-9\.\:\,].+>$', str(r)) def test_blocked_strict_redis_args(): @@ -40,7 +40,7 @@ def test_blocked_strict_redis_args(): Some arguments should explicitly be blocked because they will not work in a cluster setup """ params = {'startup_nodes': [{'host': '127.0.0.1', 'port': 7000}]} - c = StrictRedisCluster(**params) + c = RedisCluster(**params) assert c.connection_pool.connection_kwargs["socket_timeout"] == ClusterConnectionPool.RedisClusterDefaultTimeout with pytest.raises(RedisClusterException) as ex: @@ -72,7 +72,7 @@ def test_host_port_startup_node(): """ h = "192.168.0.1" p = 7000 - c = StrictRedisCluster(host=h, port=p, init_slot_cache=False) + c = RedisCluster(host=h, port=p, init_slot_cache=False) assert {"host": h, "port": p} in c.connection_pool.nodes.startup_nodes @@ -95,25 +95,25 @@ def test_readonly_instance(ro): def test_custom_connectionpool(): """ - Test that a custom connection pool will be used by StrictRedisCluster + Test that a custom connection pool will be used by RedisCluster """ h = "192.168.0.1" p = 7001 pool = DummyConnectionPool(host=h, port=p, connection_class=DummyConnection, startup_nodes=[{'host': h, 'port': p}], init_slot_cache=False) - c = StrictRedisCluster(connection_pool=pool, init_slot_cache=False) + c = RedisCluster(connection_pool=pool, init_slot_cache=False) assert c.connection_pool is pool assert c.connection_pool.connection_class == DummyConnection assert {"host": h, "port": p} in c.connection_pool.nodes.startup_nodes -@patch('rediscluster.nodemanager.StrictRedis', new=MagicMock()) +@patch('rediscluster.nodemanager.Redis', new=MagicMock()) def test_skip_full_coverage_check(): """ Test if the cluster_require_full_coverage NodeManager method was not called with the flag activated """ - c = StrictRedisCluster("192.168.0.1", 7001, init_slot_cache=False, skip_full_coverage_check=True) + c = RedisCluster("192.168.0.1", 7001, init_slot_cache=False, skip_full_coverage_check=True) c.connection_pool.nodes.cluster_require_full_coverage = MagicMock() c.connection_pool.nodes.initialize() assert not c.connection_pool.nodes.cluster_require_full_coverage.called @@ -144,7 +144,7 @@ def test_blocked_transaction(r): """ with pytest.raises(RedisClusterException) as ex: r.transaction(None) - assert unicode(ex.value).startswith("method StrictRedisCluster.transaction() is not implemented"), unicode(ex.value) + assert unicode(ex.value).startswith("method RedisCluster.transaction() is not implemented"), unicode(ex.value) def test_cluster_of_one_instance(): @@ -153,10 +153,10 @@ def test_cluster_of_one_instance(): one server. There is another redis server joining the cluster, hold slot 0, and - eventually quit the cluster. The StrictRedisCluster instance may get confused + eventually quit the cluster. The RedisCluster instance may get confused when slots mapping and nodes change during the test. """ - with patch.object(StrictRedisCluster, 'parse_response') as parse_response_mock: + with patch.object(RedisCluster, 'parse_response') as parse_response_mock: with patch.object(NodeManager, 'initialize', autospec=True) as init_mock: def side_effect(self, *args, **kwargs): def ok_call(self, *args, **kwargs): @@ -198,7 +198,7 @@ def map_7007(self): parse_response_mock.side_effect = side_effect init_mock.side_effect = side_effect_rebuild_slots_cache - rc = StrictRedisCluster(host='127.0.0.1', port=7006) + rc = RedisCluster(host='127.0.0.1', port=7006) rc.set("foo", "bar") @@ -226,12 +226,12 @@ def test_refresh_table_asap(): mock_initialize.return_value = None # Patch parse_response to avoid issues when the cluster sometimes return MOVED - with patch.object(StrictRedisCluster, 'parse_response') as mock_parse_response: + with patch.object(RedisCluster, 'parse_response') as mock_parse_response: def side_effect(self, *args, **kwargs): return None mock_parse_response.side_effect = side_effect - r = StrictRedisCluster(host="127.0.0.1", port=7000) + r = RedisCluster(host="127.0.0.1", port=7000) r.connection_pool.nodes.slots[12182] = [{ "host": "127.0.0.1", "port": 7002, @@ -261,14 +261,14 @@ def test_ask_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = StrictRedisCluster(host="127.0.0.1", port=7000) + r = RedisCluster(host="127.0.0.1", port=7000) r.connection_pool.nodes.nodes['127.0.0.1:7001'] = { 'host': u'127.0.0.1', 'server_type': 'master', 'port': 7001, 'name': '127.0.0.1:7001' } - with patch.object(StrictRedisCluster, + with patch.object(RedisCluster, 'parse_response') as parse_response: host_ip = find_node_ip_based_on_port(r, '7001') @@ -296,8 +296,8 @@ def test_pipeline_ask_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = StrictRedisCluster(host="127.0.0.1", port=7000) - with patch.object(StrictRedisCluster, + r = RedisCluster(host="127.0.0.1", port=7000) + with patch.object(RedisCluster, 'parse_response') as parse_response: def response(connection, *args, **options): @@ -329,7 +329,7 @@ def test_moved_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = StrictRedisCluster(host="127.0.0.1", port=7000) + r = RedisCluster(host="127.0.0.1", port=7000) m = Mock(autospec=True) def ask_redirect_effect(connection, *args, **options): @@ -356,7 +356,7 @@ def test_moved_redirection_pipeline(): Important thing to verify is that it tries to talk to the second node. """ - with patch.object(StrictRedisCluster, 'parse_response') as parse_response: + with patch.object(RedisCluster, 'parse_response') as parse_response: def moved_redirect_effect(connection, *args, **options): def ok_response(connection, *args, **options): assert connection.host == "127.0.0.1" @@ -368,7 +368,7 @@ def ok_response(connection, *args, **options): parse_response.side_effect = moved_redirect_effect - r = StrictRedisCluster(host="127.0.0.1", port=7000) + r = RedisCluster(host="127.0.0.1", port=7000) p = r.pipeline() p.set("foo", "bar") assert p.execute() == ["MOCK_OK"] @@ -404,7 +404,7 @@ def test_moved_redirection_on_slave_with_default_client(sr): assert_moved_redirection_on_slave( sr, ClusterConnectionPool, - StrictRedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) + RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) ) @@ -415,7 +415,7 @@ def test_moved_redirection_on_slave_with_readonly_mode_client(sr): assert_moved_redirection_on_slave( sr, ClusterReadOnlyConnectionPool, - StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) + RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) ) @@ -443,10 +443,10 @@ def test_access_correct_slave_with_readonly_mode_client(sr): ClusterConnectionPool, 'get_master_node_by_slot', return_value=master_value) as return_master_mock: - readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) + readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') - readonly_client = StrictRedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) + readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') @@ -455,7 +455,7 @@ def test_refresh_using_specific_nodes(r): Test making calls on specific nodes when the cluster has failed over to another node """ - with patch.object(StrictRedisCluster, 'parse_response') as parse_response_mock: + with patch.object(RedisCluster, 'parse_response') as parse_response_mock: with patch.object(NodeManager, 'initialize', autospec=True) as init_mock: # simulate 7006 as a failed node def side_effect(self, *args, **kwargs): @@ -498,7 +498,7 @@ def map_7007(self): init_mock.side_effect = side_effect_rebuild_slots_cache - rc = StrictRedisCluster(host='127.0.0.1', port=7006) + rc = RedisCluster(host='127.0.0.1', port=7006) assert len(rc.connection_pool.nodes.nodes) == 1 assert '127.0.0.1:7006' in rc.connection_pool.nodes.nodes diff --git a/tests/test_node_manager.py b/tests/test_node_manager.py index 52bfb367..5972d59d 100644 --- a/tests/test_node_manager.py +++ b/tests/test_node_manager.py @@ -5,14 +5,14 @@ # rediscluster imports from tests.conftest import skip_if_server_version_lt -from rediscluster import StrictRedisCluster +from rediscluster import RedisCluster from rediscluster.exceptions import RedisClusterException from rediscluster.nodemanager import NodeManager # 3rd party imports import pytest from mock import patch, Mock -from redis import StrictRedis +from redis import Redis from redis._compat import unicode from redis import ConnectionError @@ -57,7 +57,7 @@ def test_init_slots_cache_not_all_slots(s): """ # Create wrapper function so we can inject custom 'CLUSTER SLOTS' command result def get_redis_link_wrapper(*args, **kwargs): - link = StrictRedis(host="127.0.0.1", port=7000, decode_responses=True) + link = Redis(host="127.0.0.1", port=7000, decode_responses=True) orig_exec_method = link.execute_command @@ -91,7 +91,7 @@ def test_init_slots_cache_not_all_slots_not_require_full_coverage(s): """ # Create wrapper function so we can inject custom 'CLUSTER SLOTS' command result def get_redis_link_wrapper(*args, **kwargs): - link = StrictRedis(host="127.0.0.1", port=7000, decode_responses=True) + link = Redis(host="127.0.0.1", port=7000, decode_responses=True) orig_exec_method = link.execute_command @@ -130,7 +130,7 @@ def test_init_slots_cache(s): [10923, 16383, [b'127.0.0.1', 7002], [b'127.0.0.2', 7005]], ] - with patch.object(StrictRedis, 'execute_command') as execute_command_mock: + with patch.object(Redis, 'execute_command') as execute_command_mock: def patch_execute_command(*args, **kwargs): if args == ('CONFIG GET', 'cluster-require-full-coverage'): return {'cluster-require-full-coverage': 'yes'} @@ -202,7 +202,7 @@ def monkey_link(host=None, port=None, *args, **kwargs): else: result = [] - r = StrictRedisCluster(host=host, port=port, decode_responses=True) + r = RedisCluster(host=host, port=port, decode_responses=True) orig_execute_command = r.execute_command def execute_command(*args, **kwargs): @@ -279,7 +279,7 @@ def test_cluster_slots_error(): Check that exception is raised if initialize can't execute 'CLUSTER SLOTS' command. """ - with patch.object(StrictRedisCluster, 'execute_command') as execute_command_mock: + with patch.object(RedisCluster, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = Exception("foobar") n = NodeManager(startup_nodes=[{}]) @@ -321,7 +321,7 @@ def test_cluster_one_instance(): If the cluster exists of only 1 node then there is some hacks that must be validated they work. """ - with patch.object(StrictRedis, 'execute_command') as mock_execute_command: + with patch.object(Redis, 'execute_command') as mock_execute_command: return_data = [[0, 16383, ['', 7006]]] def patch_execute_command(*args, **kwargs): @@ -367,7 +367,7 @@ def test_init_with_down_node(): def get_redis_link(host, port, decode_responses=False): if port == 7000: raise ConnectionError('mock connection error for 7000') - return StrictRedis(host=host, port=port, decode_responses=decode_responses) + return Redis(host=host, port=port, decode_responses=decode_responses) with patch.object(NodeManager, 'get_redis_link', side_effect=get_redis_link): n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 2f034d6b..acbc6172 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -5,7 +5,7 @@ import re # rediscluster imports -from rediscluster.client import StrictRedisCluster +from rediscluster.client import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool from rediscluster.exceptions import RedisClusterException from tests.conftest import _get_client @@ -527,7 +527,7 @@ def test_moved_redirection_on_slave_with_default(self): """ self.assert_moved_redirection_on_slave( ClusterConnectionPool, - StrictRedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) + RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) ) def test_moved_redirection_on_slave_with_readonly_mode_client(self): @@ -536,7 +536,7 @@ def test_moved_redirection_on_slave_with_readonly_mode_client(self): """ self.assert_moved_redirection_on_slave( ClusterReadOnlyConnectionPool, - StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) + RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) ) def test_access_correct_slave_with_readonly_mode_client(self, sr): @@ -564,6 +564,6 @@ def test_access_correct_slave_with_readonly_mode_client(self, sr): ClusterConnectionPool, 'get_master_node_by_slot', return_value=master_value) as return_master_mock: - readonly_client = StrictRedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) + readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) with readonly_client.pipeline() as readonly_pipe: assert readonly_pipe.get('foo88').get('foo87').execute() == [b('bar'), b('foo')] diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 686b55a3..4bbecfdc 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -6,13 +6,13 @@ import time # rediscluster imports -from rediscluster.client import StrictRedisCluster +from rediscluster.client import RedisCluster # 3rd party imports import pytest # import redis -from redis import StrictRedis, Redis +from redis import Redis from redis.exceptions import ConnectionError from redis._compat import basestring, u, unichr, b @@ -221,12 +221,12 @@ class TestPubSubMessages(object): Bug: Currently in cluster mode publish command will behave different then in standard/non cluster mode. See (docs/Pubsub.md) for details. - Currently StrictRedis instances will be used to test pubsub because they + Currently Redis instances will be used to test pubsub because they are easier to work with. """ def get_strict_redis_node(self, port, host="127.0.0.1"): - return StrictRedis(port=port, host=host) + return Redis(port=port, host=host) def setup_method(self, *args): self.message = None @@ -444,7 +444,7 @@ def test_pubsub_thread_publish(): """ startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] - r = StrictRedisCluster( + r = RedisCluster( startup_nodes=startup_nodes, decode_responses=True, max_connections=16, From f73676d010601e02bf25ef7cd4e95ec80b831fe5 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:23:29 +0100 Subject: [PATCH 04/65] Update __init__ to use new helper method to build VERSION attribute --- rediscluster/__init__.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index 5024086d..bd9c5437 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -17,5 +17,15 @@ # Major, Minor, Fix version __version__ = (2, 0, 0) +def int_or_str(value): + try: + return int(value) + except ValueError: + return value + + +__version__ = '2.0.0' +VERSION = tuple(map(int_or_str, __version__.split('.'))) + if sys.version_info[0:3] == (3, 4, 0): raise RuntimeError("CRITICAL: rediscluster do not work with python 3.4.0. Please use 3.4.1 or higher.") From 82c650151d31a0e8c6a9a2f1c3ce6059c6b8578a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:23:55 +0100 Subject: [PATCH 05/65] Update test_scripting.py to match redis-py code --- tests/test_scripting.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/test_scripting.py b/tests/test_scripting.py index 1dd14d50..968fdc61 100644 --- a/tests/test_scripting.py +++ b/tests/test_scripting.py @@ -1,14 +1,13 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement +from __future__ import unicode_literals # rediscluster imports from rediscluster.exceptions import RedisClusterException # 3rd party imports from redis import exceptions -from redis._compat import b import pytest @@ -114,7 +113,7 @@ def test_script_object_in_pipeline(self, r): assert multiply.sha assert r.script_exists(multiply.sha) == [True] # [SET worked, GET 'a', result of multiple script] - assert pipe.execute() == [True, b('2'), 6] + assert pipe.execute() == [True, b'2', 6] # purge the script from redis's cache and re-run the pipeline # the multiply script object knows it's sha, so it shouldn't get @@ -127,7 +126,7 @@ def test_script_object_in_pipeline(self, r): multiply(keys=['a'], args=[3], client=pipe) assert r.script_exists(multiply.sha) == [False] # [SET worked, GET 'a', result of multiple script] - assert pipe.execute() == [True, b('2'), 6] + assert pipe.execute() == [True, b'2', 6] @pytest.mark.xfail(reason="Not Yet Implemented") def test_eval_msgpack_pipeline_error_in_lua(self, r): From 081b3ac37ba0d02bfac11ed0e588924e14658c2d Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:32:12 +0100 Subject: [PATCH 06/65] Update test_pubsub.py to match redis-py code --- tests/test_pubsub.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 4bbecfdc..e0ea2837 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement +from __future__ import unicode_literals import threading import time @@ -14,8 +14,9 @@ # import redis from redis import Redis from redis.exceptions import ConnectionError -from redis._compat import basestring, u, unichr, b +from redis._compat import basestring, unichr +from .conftest import _get_client from .conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt def wait_for_message(pubsub, timeout=0.5, ignore_subscribe_messages=False): @@ -35,7 +36,7 @@ def make_message(type, channel, data, pattern=None): return { 'type': type, 'pattern': pattern and pattern.encode('utf-8') or None, - 'channel': channel.encode('utf-8'), + 'channel': channel and channel.encode('utf-8') or None, 'data': data.encode('utf-8') if isinstance(data, basestring) else data } @@ -48,7 +49,7 @@ def make_subscribe_test_data(pubsub, type): 'unsub_type': 'unsubscribe', 'sub_func': pubsub.subscribe, 'unsub_func': pubsub.unsubscribe, - 'keys': ['foo', 'bar', u('uni') + unichr(4456) + u('code')] + 'keys': ['foo', 'bar', 'uni' + unichr(4456) + 'code'] } elif type == 'pattern': return { @@ -57,7 +58,7 @@ def make_subscribe_test_data(pubsub, type): 'unsub_type': 'punsubscribe', 'sub_func': pubsub.psubscribe, 'unsub_func': pubsub.punsubscribe, - 'keys': ['f*', 'b*', u('uni') + unichr(4456) + u('*')] + 'keys': ['f*', 'b*', 'uni' + unichr(4456) + '*'] } assert False, 'invalid subscribe type: {0}'.format(type) @@ -309,7 +310,7 @@ def test_pattern_message_handler(self, r): @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_unicode_channel_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) - channel = u('uni') + unichr(4456) + u('code') + channel = 'uni' + unichr(4456) + 'code' channels = {channel: self.message_handler} print(channels) p.subscribe(**channels) @@ -320,8 +321,8 @@ def test_unicode_channel_message_handler(self, r): @pytest.mark.xfail(reason="Pattern pubsub do not work currently") def test_unicode_pattern_message_handler(self, r): p = r.pubsub(ignore_subscribe_messages=True) - pattern = u('uni') + unichr(4456) + u('*') - channel = u('uni') + unichr(4456) + u('code') + pattern = 'uni' + unichr(4456) + '*' + channel = 'uni' + unichr(4456) + 'code' p.psubscribe(**{pattern: self.message_handler}) assert r.publish(channel, 'test message') == 1 assert wait_for_message(p) is None @@ -332,9 +333,9 @@ def test_unicode_pattern_message_handler(self, r): class TestPubSubAutoDecoding(object): "These tests only validate that we get unicode values back" - channel = u('uni') + unichr(4456) + u('code') - pattern = u('uni') + unichr(4456) + u('*') - data = u('abc') + unichr(4458) + u('123') + channel = 'uni' + unichr(4456) + 'code' + pattern = 'uni' + unichr(4456) + '*' + data = 'abc' + unichr(4458) + '123' def make_message(self, type, channel, data, pattern=None): return { @@ -400,7 +401,7 @@ def test_channel_message_handler(self, o): # test that we reconnected to the correct channel p.connection.disconnect() assert wait_for_message(p) is None # should reconnect - new_data = self.data + u('new data') + new_data = self.data + 'new data' o.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('message', self.channel, @@ -419,7 +420,7 @@ def test_pattern_message_handler(self, o): # test that we reconnected to the correct pattern p.connection.disconnect() assert wait_for_message(p) is None # should reconnect - new_data = self.data + u('new data') + new_data = self.data + 'new data' o.publish(self.channel, new_data) assert wait_for_message(p) is None assert self.message == self.make_message('pmessage', self.channel, @@ -487,7 +488,7 @@ class TestPubSubPubSubSubcommands(object): def test_pubsub_channels(self, r): r.pubsub(ignore_subscribe_messages=True).subscribe('foo', 'bar', 'baz', 'quux') channels = sorted(r.pubsub_channels()) - assert channels == [b('bar'), b('baz'), b('foo'), b('quux')] + assert channels == [b'bar', b'baz', b'foo', b'quux'] @skip_if_redis_py_version_lt('2.10.6') def test_pubsub_numsub(self, r): @@ -495,7 +496,7 @@ def test_pubsub_numsub(self, r): r.pubsub(ignore_subscribe_messages=True).subscribe('bar', 'baz') r.pubsub(ignore_subscribe_messages=True).subscribe('baz') - channels = [(b('bar'), 2), (b('baz'), 3), (b('foo'), 1)] + channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)] assert channels == sorted(r.pubsub_numsub('foo', 'bar', 'baz')) @skip_if_redis_py_version_lt('2.10.6') From 6afa8ad1db1c70220677ba1dfc6707c3de0d05b4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:34:04 +0100 Subject: [PATCH 07/65] Add python_requires to define what redis version to include/exclude. Add extras_require to support installing hiredis as extras dependency --- setup.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/setup.py b/setup.py index 8215d9fa..4d8fcca0 100644 --- a/setup.py +++ b/setup.py @@ -34,6 +34,12 @@ install_requires=[ 'redis>=3.0.0,<3.1.0' ], + python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", + extras_require={ + 'hiredis': [ + "hiredis>=0.1.3", + ], + }, keywords=[ 'redis', 'redis cluster', From f7b94b9196ee38de216b52d95fa19686c319fc47 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:36:38 +0100 Subject: [PATCH 08/65] Update years on docs/License file --- docs/License.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/License.txt b/docs/License.txt index 9c297fec..bf0afb13 100644 --- a/docs/License.txt +++ b/docs/License.txt @@ -1,4 +1,4 @@ -Copyright (c) 2014-2016 Johan Andersson +Copyright (c) 2014-2018 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation From 1709e1f6044766a4723c671887405f619c766595 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:37:14 +0100 Subject: [PATCH 09/65] Add LICENSE file to root of repo --- LICENSE | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..66ccb488 --- /dev/null +++ b/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2014-2018 Johan Andersson + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + \ No newline at end of file From 6eff6b51a55f87d1a2cb986cf9fc0703856f638c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:43:30 +0100 Subject: [PATCH 10/65] Add [metadata] and [pycodestyle] to setup.cfg to configure tools for future use. --- setup.cfg | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/setup.cfg b/setup.cfg index 3c6e79cf..85215dcb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,9 @@ [bdist_wheel] universal=1 + +[metadata] +license_file = LICENSE + +[pycodestyle] +show-source = 1 +exclude = .venv,.tox,dist,docs,build,*.egg From 7334ac8c6efa597281521e186196519ef506b44c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 26 Nov 2018 23:54:04 +0100 Subject: [PATCH 11/65] Fix byte and unicode issues in test_pipeline.py --- tests/test_pipeline.py | 70 ++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index acbc6172..e8750439 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement +from __future__ import unicode_literals import re # rediscluster imports @@ -13,7 +13,7 @@ # 3rd party imports import pytest from mock import patch -from redis._compat import b, u, unichr, unicode +from redis._compat import unichr, unicode from redis.exceptions import WatchError, ResponseError, ConnectionError @@ -23,15 +23,19 @@ class TestPipeline(object): def test_pipeline(self, r): with r.pipeline() as pipe: - pipe.set('a', 'a1').get('a').zadd('z', z1=1).zadd('z', z2=4) - pipe.zincrby('z', 'z1').zrange('z', 0, 5, withscores=True) + (pipe.set('a', 'a1') + .get('a') + .zadd('z', {'z1': 1}) + .zadd('z', {'z2': 4}) + .zincrby('z', 1, 'z1') + .zrange('z', 0, 5, withscores=True)) assert pipe.execute() == [ True, - b('a1'), + b'a1', True, True, 2.0, - [(b('z1'), 2.0), (b('z2'), 4)], + [(b'z1', 2.0), (b'z2', 4)], ] def test_pipeline_length(self, r): @@ -54,18 +58,18 @@ def test_pipeline_no_transaction(self, r): with r.pipeline(transaction=False) as pipe: pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') assert pipe.execute() == [True, True, True] - assert r['a'] == b('a1') - assert r['b'] == b('b1') - assert r['c'] == b('c1') + assert r['a'] == b'a1' + assert r['b'] == b'b1' + assert r['c'] == b'c1' def test_pipeline_eval(self, r): with r.pipeline(transaction=False) as pipe: pipe.eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", 2, "A{foo}", "B{foo}", "first", "second") res = pipe.execute()[0] - assert res[0] == b('A{foo}') - assert res[1] == b('B{foo}') - assert res[2] == b('first') - assert res[3] == b('second') + assert res[0] == b'A{foo}' + assert res[1] == b'B{foo}' + assert res[2] == b'first' + assert res[3] == b'second' @pytest.mark.xfail(reason="unsupported command: watch") def test_pipeline_no_transaction_watch(self, r): @@ -95,7 +99,7 @@ def test_pipeline_no_transaction_watch_failure(self, r): with pytest.raises(WatchError): pipe.execute() - assert r['a'] == b('bad') + assert r['a'] == b'bad' def test_exec_error_in_response(self, r): """ @@ -108,23 +112,23 @@ def test_exec_error_in_response(self, r): result = pipe.execute(raise_on_error=False) assert result[0] - assert r['a'] == b('1') + assert r['a'] == b'1' assert result[1] - assert r['b'] == b('2') + assert r['b'] == b'2' # we can't lpush to a key that's a string value, so this should # be a ResponseError exception assert isinstance(result[2], ResponseError) - assert r['c'] == b('a') + assert r['c'] == 'a' # since this isn't a transaction, the other commands after the # error are still executed assert result[3] - assert r['d'] == b('4') + assert r['d'] == b'4' # make sure the pipe was restored to a working state assert pipe.set('z', 'zzz').execute() == [True] - assert r['z'] == b('zzz') + assert r['z'] == b'zzz' def test_exec_error_raised(self, r): r['c'] = 'a' @@ -137,7 +141,7 @@ def test_exec_error_raised(self, r): # make sure the pipe was restored to a working state assert pipe.set('z', 'zzz').execute() == [True] - assert r['z'] == b('zzz') + assert r['z'] == b'zzz' def test_parse_error_raised(self, r): with r.pipeline() as pipe: @@ -151,7 +155,7 @@ def test_parse_error_raised(self, r): # make sure the pipe was restored to a working state assert pipe.set('z', 'zzz').execute() == [True] - assert r['z'] == b('zzz') + assert r['z'] == b'zzz' @pytest.mark.xfail(reason="unsupported command: watch") def test_watch_succeed(self, r): @@ -163,8 +167,8 @@ def test_watch_succeed(self, r): assert pipe.watching a_value = pipe.get('a') b_value = pipe.get('b') - assert a_value == b('1') - assert b_value == b('2') + assert a_value == b'1' + assert b_value == b'2' pipe.multi() pipe.set('c', 3) @@ -197,7 +201,7 @@ def test_unwatch(self, r): pipe.unwatch() assert not pipe.watching pipe.get('a') - assert pipe.execute() == [b('1')] + assert pipe.execute() == [b'1'] @pytest.mark.xfail(reason="unsupported command: watch") def test_transaction_callable(self, r): @@ -207,9 +211,9 @@ def test_transaction_callable(self, r): def my_transaction(pipe): a_value = pipe.get('a') - assert a_value in (b('1'), b('2')) + assert a_value in (b'1', b'2') b_value = pipe.get('b') - assert b_value == b('2') + assert b_value == b'2' # silly run-once code... incr's "a" so WatchError should be raised # forcing this all to run again. this should incr "a" once to "2" @@ -222,7 +226,7 @@ def my_transaction(pipe): result = r.transaction(my_transaction, 'a', 'b') assert result == [True] - assert r['c'] == b('4') + assert r['c'] == b'4' def test_exec_error_in_no_transaction_pipeline(self, r): r['a'] = 1 @@ -236,10 +240,10 @@ def test_exec_error_in_no_transaction_pipeline(self, r): assert unicode(ex.value).startswith('Command # 1 (LLEN a) of ' 'pipeline caused error: ') - assert r['a'] == b('1') + assert r['a'] == b'1' def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): - key = unichr(3456) + u('abcd') + unichr(3421) + key = unichr(3456) + u'abcd' + unichr(3421) r[key] = 1 with r.pipeline(transaction=False) as pipe: pipe.llen(key) @@ -251,7 +255,7 @@ def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): expected = unicode('Command # 1 (LLEN {0}) of pipeline caused error: ').format(key) assert unicode(ex.value).startswith(expected) - assert r[key] == b('1') + assert r[key] == b'1' def test_blocked_methods(self, r): """ @@ -498,8 +502,8 @@ def test_pipeline_readonly(self, r, ro): with ro.pipeline() as readonly_pipe: readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True) assert readonly_pipe.execute() == [ - b('a1'), - [(b('z1'), 1.0), (b('z2'), 4)], + b'a1', + [(b'z1', 1.0), (b'z2', 4)], ] def assert_moved_redirection_on_slave(self, connection_pool_cls, cluster_obj): @@ -566,4 +570,4 @@ def test_access_correct_slave_with_readonly_mode_client(self, sr): return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) with readonly_client.pipeline() as readonly_pipe: - assert readonly_pipe.get('foo88').get('foo87').execute() == [b('bar'), b('foo')] + assert readonly_pipe.get('foo88').get('foo87').execute() == [b'bar', b'foo'] From 647b1ab6d00a1e9716e714c6ac41b405820fd3a7 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 16 Dec 2018 11:07:29 +0100 Subject: [PATCH 12/65] Fix all unicode and binary litterals in test_commands.py --- tests/test_commands.py | 451 ++++++++++++++++++++--------------------- 1 file changed, 224 insertions(+), 227 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 2e349f6b..fe251b14 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -31,13 +31,13 @@ class TestRedisCommands(object): @skip_if_server_version_lt('2.9.9') def test_zrevrangebylex(self, r): r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) - assert r.zrevrangebylex('a', '[c', '-') == [b('c'), b('b'), b('a')] - assert r.zrevrangebylex('a', '(c', '-') == [b('b'), b('a')] + assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a'] + assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a'] assert r.zrevrangebylex('a', '(g', '[aaa') == \ - [b('f'), b('e'), b('d'), b('c'), b('b')] - assert r.zrevrangebylex('a', '+', '[f') == [b('g'), b('f')] + [b'f', b'e', b'd', b'c', b'b'] + assert r.zrevrangebylex('a', '+', '[f') == [b'g', b'f'] assert r.zrevrangebylex('a', '+', '-', start=3, num=2) == \ - [b('d'), b('c')] + [b'd', b'c'] def test_command_on_invalid_key_type(self, r): r.lpush('a', '1') @@ -80,13 +80,13 @@ def test_config_set(self, r): def test_echo(self, r): for server, res in r.echo('foo bar').items(): - assert res == b('foo bar') + assert res == b'foo bar' def test_object(self, r): r['a'] = 'foo' assert isinstance(r.object('refcount', 'a'), int) assert isinstance(r.object('idletime', 'a'), int) - assert r.object('encoding', 'a') in (b('raw'), b('embstr')) + assert r.object('encoding', 'a') in (b'raw', b'embstr') assert r.object('idletime', 'invalid-key') is None def test_ping(self, r): @@ -101,9 +101,9 @@ def test_time(self, r): # BASIC KEY COMMANDS def test_append(self, r): assert r.append('a', 'a1') == 2 - assert r['a'] == b('a1') + assert r['a'] == b'a1' assert r.append('a', 'a2') == 4 - assert r['a'] == b('a1a2') + assert r['a'] == b'a1a2' def test_bitcount(self, r): r.setbit('a', 5, True) @@ -133,16 +133,13 @@ def test_bitop_not_supported(self, r): def test_bitpos(self, r): """ Bitpos was added in redis-py in version 2.10.2 - - # TODO: Added b() around keys but i think they should not have to be - there for this command to work properly. """ key = 'key:bitpos' - r.set(key, b('\xff\xf0\x00')) + r.set(key, b'\xff\xf0\x00') assert r.bitpos(key, 0) == 12 assert r.bitpos(key, 0, 2, -1) == 16 assert r.bitpos(key, 0, -2, -1) == 12 - r.set(key, b('\x00\xff\xf0')) + r.set(key, b'\x00\xff\xf0') assert r.bitpos(key, 1, 0) == 8 assert r.bitpos(key, 1, 1) == 8 r.set(key, '\x00\x00\x00') @@ -155,7 +152,7 @@ def test_bitpos_wrong_arguments(self, r): Bitpos was added in redis-py in version 2.10.2 """ key = 'key:bitpos:wrong:args' - r.set(key, b('\xff\xf0\x00')) + r.set(key, b'\xff\xf0\x00') with pytest.raises(RedisError): r.bitpos(key, 0, end=1) == 12 with pytest.raises(RedisError): @@ -163,11 +160,11 @@ def test_bitpos_wrong_arguments(self, r): def test_decr(self, r): assert r.decr('a') == -1 - assert r['a'] == b('-1') + assert r['a'] == b'-1' assert r.decr('a') == -2 - assert r['a'] == b('-2') + assert r['a'] == b'-2' assert r.decr('a', amount=5) == -7 - assert r['a'] == b('-7') + assert r['a'] == b'-7' def test_delete(self, r): assert r.delete('a') == 0 @@ -191,7 +188,7 @@ def test_dump_and_restore(self, r): dumped = r.dump('a') del r['a'] r.restore('a', 0, dumped) - assert r['a'] == b('foo') + assert r['a'] == b'foo' def test_exists(self, r): assert not r.exists('a') @@ -232,19 +229,19 @@ def test_expireat_unixtime(self, r): def test_get_and_set(self, r): # get and set can't be tested independently of each other assert r.get('a') is None - byte_string = b('value') + byte_string = b'value' integer = 5 - unicode_string = unichr(3456) + u('abcd') + unichr(3421) + unicode_string = unichr(3456) + u'abcd' + unichr(3421) assert r.set('byte_string', byte_string) assert r.set('integer', 5) assert r.set('unicode_string', unicode_string) assert r.get('byte_string') == byte_string - assert r.get('integer') == b(str(integer)) + assert r.get('integer') == bstr(integer) assert r.get('unicode_string').decode('utf-8') == unicode_string def test_getitem_and_setitem(self, r): r['a'] = 'bar' - assert r['a'] == b('bar') + assert r['a'] == b'bar' def test_getitem_raises_keyerror_for_missing_key(self, r): with pytest.raises(KeyError): @@ -268,31 +265,31 @@ def test_get_set_bit(self, r): def test_getrange(self, r): r['a'] = 'foo' - assert r.getrange('a', 0, 0) == b('f') - assert r.getrange('a', 0, 2) == b('foo') - assert r.getrange('a', 3, 4) == b('') + assert r.getrange('a', 0, 0) == b'f' + assert r.getrange('a', 0, 2) == b'foo' + assert r.getrange('a', 3, 4) == b'' def test_getset(self, r): assert r.getset('a', 'foo') is None - assert r.getset('a', 'bar') == b('foo') - assert r.get('a') == b('bar') + assert r.getset('a', 'bar') == b'foo' + assert r.get('a') == b'bar' def test_incr(self, r): assert r.incr('a') == 1 - assert r['a'] == b('1') + assert r['a'] == b'1' assert r.incr('a') == 2 - assert r['a'] == b('2') + assert r['a'] == b'2' assert r.incr('a', amount=5) == 7 - assert r['a'] == b('7') + assert r['a'] == b'7' def test_incrby(self, r): assert r.incrby('a') == 1 assert r.incrby('a', 4) == 5 - assert r['a'] == b('5') + assert r['a'] == b'5' def test_incrbyfloat(self, r): assert r.incrbyfloat('a') == 1.0 - assert r['a'] == b('1') + assert r['a'] == b'1' assert r.incrbyfloat('a', 1.1) == 2.1 assert float(r['a']) == float(2.1) @@ -303,41 +300,41 @@ def test_keys(self, r): keys = keys_with_underscores.union(set(['testc'])) for key in keys: r[key] = 1 - assert set(r.keys(pattern='test_*')) == {b(k) for k in keys_with_underscores} - assert set(r.keys(pattern='test*')) == {b(k) for k in keys} + assert set(r.keys(pattern='test_*')) == {b"{0}".format(k) for k in keys_with_underscores} + assert set(r.keys(pattern='test*')) == {b"{0}".format(k) for k in keys} def test_mget(self, r): assert r.mget(['a', 'b']) == [None, None] r['a'] = '1' r['b'] = '2' r['c'] = '3' - assert r.mget('a', 'other', 'b', 'c') == [b('1'), None, b('2'), b('3')] + assert r.mget('a', 'other', 'b', 'c') == [b'1', None, b'2', b'3'] def test_mset(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.mset(d) for k, v in iteritems(d): assert r[k] == v def test_mset_kwargs(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.mset(**d) for k, v in iteritems(d): assert r[k] == v def test_msetnx(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.msetnx(d) - d2 = {'a': b('x'), 'd': b('4')} + d2 = {'a': b'x', 'd': b'4'} assert not r.msetnx(d2) for k, v in iteritems(d): assert r[k] == v assert r.get('d') is None def test_msetnx_kwargs(self, r): - d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.msetnx(**d) - d2 = {'a': b('x'), 'd': b('4')} + d2 = {'a': b'x', 'd': b'4'} assert not r.msetnx(**d2) for k, v in iteritems(d): assert r[k] == v @@ -372,26 +369,26 @@ def test_pexpireat_unixtime(self, r): def test_psetex(self, r): assert r.psetex('a', 1000, 'value') - assert r['a'] == b('value') + assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 def test_psetex_timedelta(self, r): expire_at = datetime.timedelta(milliseconds=1000) assert r.psetex('a', expire_at, 'value') - assert r['a'] == b('value') + assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 def test_randomkey(self, r): assert r.randomkey() is None for key in ('a', 'b', 'c'): r[key] = 1 - assert r.randomkey() in (b('a'), b('b'), b('c')) + assert r.randomkey() in (b'a', b'b', b'c') def test_rename(self, r): r['a'] = '1' assert r.rename('a', 'b') assert r.get('a') is None - assert r['b'] == b('1') + assert r['b'] == b'1' with pytest.raises(ResponseError) as ex: r.rename("foo", "foo") @@ -406,27 +403,27 @@ def test_renamenx(self, r): r['a'] = '1' r['b'] = '2' assert not r.renamenx('a', 'b') - assert r['a'] == b('1') - assert r['b'] == b('2') + assert r['a'] == b'1' + assert r['b'] == b'2' assert r.renamenx('a', 'c') - assert r['c'] == b('1') + assert r['c'] == b'1' def test_set_nx(self, r): assert r.set('a', '1', nx=True) assert not r.set('a', '2', nx=True) - assert r['a'] == b('1') + assert r['a'] == b'1' def test_set_xx(self, r): assert not r.set('a', '1', xx=True) assert r.get('a') is None r['a'] = 'bar' assert r.set('a', '2', xx=True) - assert r.get('a') == b('2') + assert r.get('a') == b'2' def test_set_px(self, r): assert r.set('a', '1', px=10000) - assert r['a'] == b('1') + assert r['a'] == b'1' assert 0 < r.pttl('a') <= 10000 assert 0 < r.ttl('a') <= 10 @@ -452,21 +449,21 @@ def test_set_multipleoptions(self, r): def test_setex(self, r): assert r.setex('a', 60, '1') - assert r['a'] == b('1') + assert r['a'] == b'1' assert 0 < r.ttl('a') <= 60 def test_setnx(self, r): assert r.setnx('a', '1') - assert r['a'] == b('1') + assert r['a'] == b'1' assert not r.setnx('a', '2') - assert r['a'] == b('1') + assert r['a'] == b'1' def test_setrange(self, r): assert r.setrange('a', 5, 'foo') == 8 - assert r['a'] == b('\0\0\0\0\0foo') + assert r['a'] == b'\0\0\0\0\0foo' r['a'] = 'abcdefghijh' assert r.setrange('a', 6, '12345') == 11 - assert r['a'] == b('abcdef12345') + assert r['a'] == b'abcdef12345' def test_strlen(self, r): r['a'] = 'foo' @@ -474,74 +471,74 @@ def test_strlen(self, r): def test_substr(self, r): r['a'] = '0123456789' - assert r.substr('a', 0) == b('0123456789') - assert r.substr('a', 2) == b('23456789') - assert r.substr('a', 3, 5) == b('345') - assert r.substr('a', 3, -2) == b('345678') + assert r.substr('a', 0) == b'0123456789' + assert r.substr('a', 2) == b'23456789' + assert r.substr('a', 3, 5) == b'345' + assert r.substr('a', 3, -2) == b'345678' def test_type(self, r): - assert r.type('a') == b('none') + assert r.type('a') == b'none' r['a'] = '1' - assert r.type('a') == b('string') + assert r.type('a') == b'string' del r['a'] r.lpush('a', '1') - assert r.type('a') == b('list') + assert r.type('a') == b'list' del r['a'] r.sadd('a', '1') - assert r.type('a') == b('set') + assert r.type('a') == b'set' del r['a'] r.zadd('a', **{'1': 1}) - assert r.type('a') == b('zset') + assert r.type('a') == b'zset' # LIST COMMANDS def test_blpop(self, r): r.rpush('a{foo}', '1', '2') r.rpush('b{foo}', '3', '4') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('3')) - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('4')) - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('1')) - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('2')) + assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'3') + assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'4') + assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'1') + assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'2') assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) is None r.rpush('c{foo}', '1') - assert r.blpop('c{foo}', timeout=1) == (b('c{foo}'), b('1')) + assert r.blpop('c{foo}', timeout=1) == (b'c{foo}', b'1') def test_brpop(self, r): r.rpush('a{foo}', '1', '2') r.rpush('b{foo}', '3', '4') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('4')) - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('3')) - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('2')) - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('1')) + assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'4') + assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'3') + assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'2') + assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'1') assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) is None r.rpush('c{foo}', '1') - assert r.brpop('c{foo}', timeout=1) == (b('c{foo}'), b('1')) + assert r.brpop('c{foo}', timeout=1) == (b'c{foo}', b'1') def test_brpoplpush(self, r): r.rpush('a{foo}', '1', '2') r.rpush('b{foo}', '3', '4') - assert r.brpoplpush('a{foo}', 'b{foo}') == b('2') - assert r.brpoplpush('a{foo}', 'b{foo}') == b('1') + assert r.brpoplpush('a{foo}', 'b{foo}') == b'2' + assert r.brpoplpush('a{foo}', 'b{foo}') == b'1' assert r.brpoplpush('a{foo}', 'b{foo}', timeout=1) is None assert r.lrange('a{foo}', 0, -1) == [] - assert r.lrange('b{foo}', 0, -1) == [b('1'), b('2'), b('3'), b('4')] + assert r.lrange('b{foo}', 0, -1) == [b'1', b'2', b'3', b'4'] def test_brpoplpush_empty_string(self, r): r.rpush('a', '') - assert r.brpoplpush('a', 'b') == b('') + assert r.brpoplpush('a', 'b') == b'' def test_lindex(self, r): r.rpush('a', '1', '2', '3') - assert r.lindex('a', '0') == b('1') - assert r.lindex('a', '1') == b('2') - assert r.lindex('a', '2') == b('3') + assert r.lindex('a', '0') == b'1' + assert r.lindex('a', '1') == b'2' + assert r.lindex('a', '2') == b'3' def test_linsert(self, r): r.rpush('a', '1', '2', '3') assert r.linsert('a', 'after', '2', '2.5') == 4 - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('2.5'), b('3')] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'2.5', b'3'] assert r.linsert('a', 'before', '2', '1.5') == 5 assert r.lrange('a', 0, -1) == \ - [b('1'), b('1.5'), b('2'), b('2.5'), b('3')] + [b'1', b'1.5', b'2', b'2.5', b'3'] def test_llen(self, r): r.rpush('a', '1', '2', '3') @@ -549,74 +546,74 @@ def test_llen(self, r): def test_lpop(self, r): r.rpush('a', '1', '2', '3') - assert r.lpop('a') == b('1') - assert r.lpop('a') == b('2') - assert r.lpop('a') == b('3') + assert r.lpop('a') == b'1' + assert r.lpop('a') == b'2' + assert r.lpop('a') == b'3' assert r.lpop('a') is None def test_lpush(self, r): assert r.lpush('a', '1') == 1 assert r.lpush('a', '2') == 2 assert r.lpush('a', '3', '4') == 4 - assert r.lrange('a', 0, -1) == [b('4'), b('3'), b('2'), b('1')] + assert r.lrange('a', 0, -1) == [b'4', b'3', b'2', b'1'] def test_lpushx(self, r): assert r.lpushx('a', '1') == 0 assert r.lrange('a', 0, -1) == [] r.rpush('a', '1', '2', '3') assert r.lpushx('a', '4') == 4 - assert r.lrange('a', 0, -1) == [b('4'), b('1'), b('2'), b('3')] + assert r.lrange('a', 0, -1) == [4'4', b'1', b'2', b'3'] def test_lrange(self, r): r.rpush('a', '1', '2', '3', '4', '5') - assert r.lrange('a', 0, 2) == [b('1'), b('2'), b('3')] - assert r.lrange('a', 2, 10) == [b('3'), b('4'), b('5')] - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4'), b('5')] + assert r.lrange('a', 0, 2) == [b'1', b'2', b'3'] + assert r.lrange('a', 2, 10) == [b'3', b'4', b'5'] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4', b'5'] def test_lrem(self, r): r.rpush('a', '1', '1', '1', '1') assert r.lrem('a', '1', 1) == 1 - assert r.lrange('a', 0, -1) == [b('1'), b('1'), b('1')] + assert r.lrange('a', 0, -1) == [b'1', b'1', b'1'] assert r.lrem('a', 0, '1') == 3 assert r.lrange('a', 0, -1) == [] def test_lset(self, r): r.rpush('a', '1', '2', '3') - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3')] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'3'] assert r.lset('a', 1, '4') - assert r.lrange('a', 0, 2) == [b('1'), b('4'), b('3')] + assert r.lrange('a', 0, 2) == [b'1', b'4', b'3'] def test_ltrim(self, r): r.rpush('a', '1', '2', '3') assert r.ltrim('a', 0, 1) - assert r.lrange('a', 0, -1) == [b('1'), b('2')] + assert r.lrange('a', 0, -1) == [b'1', b'2'] def test_rpop(self, r): r.rpush('a', '1', '2', '3') - assert r.rpop('a') == b('3') - assert r.rpop('a') == b('2') - assert r.rpop('a') == b('1') + assert r.rpop('a') == b'3' + assert r.rpop('a') == b'2' + assert r.rpop('a') == b'1' assert r.rpop('a') is None def test_rpoplpush(self, r): r.rpush('a', 'a1', 'a2', 'a3') r.rpush('b', 'b1', 'b2', 'b3') - assert r.rpoplpush('a', 'b') == b('a3') - assert r.lrange('a', 0, -1) == [b('a1'), b('a2')] - assert r.lrange('b', 0, -1) == [b('a3'), b('b1'), b('b2'), b('b3')] + assert r.rpoplpush('a', 'b') == b'a3' + assert r.lrange('a', 0, -1) == [b'a1', b'a2'] + assert r.lrange('b', 0, -1) == [b'a3', b'b1', b'b2', b'b3'] def test_rpush(self, r): assert r.rpush('a', '1') == 1 assert r.rpush('a', '2') == 2 assert r.rpush('a', '3', '4') == 4 - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] def test_rpushx(self, r): assert r.rpushx('a', 'b') == 0 assert r.lrange('a', 0, -1) == [] r.rpush('a', '1', '2', '3') assert r.rpushx('a', '4') == 4 - assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')] + assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] # SCAN COMMANDS def test_scan(self, r): @@ -629,81 +626,81 @@ def test_scan(self, r): assert cursor == 0 keys += partial_keys - assert set(keys) == set([b('a'), b('b'), b('c')]) + assert set(keys) == set([b'a', b'b', b'c']) keys = [] for result in r.scan(match='a').values(): cursor, partial_keys = result assert cursor == 0 keys += partial_keys - assert set(keys) == set([b('a')]) + assert set(keys) == set([b'a']) def test_scan_iter(self, r): alphabet = 'abcdefghijklmnopqrstuvwABCDEFGHIJKLMNOPQRSTUVW' for i, c in enumerate(alphabet): r.set(c, i) keys = list(r.scan_iter()) - expected_result = [b(c) for c in alphabet] + expected_result = [b"{0}".format(c) for c in alphabet] assert set(keys) == set(expected_result) keys = list(r.scan_iter(match='a')) - assert set(keys) == set([b('a')]) + assert set(keys) == set([b'a']) r.set('Xa', 1) r.set('Xb', 2) r.set('Xc', 3) keys = list(r.scan_iter('X*', count=1000)) assert len(keys) == 3 - assert set(keys) == set([b('Xa'), b('Xb'), b('Xc')]) + assert set(keys) == set([b'Xa', b'Xb', b'Xc']) def test_sscan(self, r): r.sadd('a', 1, 2, 3) cursor, members = r.sscan('a') assert cursor == 0 - assert set(members) == set([b('1'), b('2'), b('3')]) - _, members = r.sscan('a', match=b('1')) - assert set(members) == set([b('1')]) + assert set(members) == set([b'1', b'2', b'3']) + _, members = r.sscan('a', match=b'1') + assert set(members) == set([b'1']) def test_sscan_iter(self, r): r.sadd('a', 1, 2, 3) members = list(r.sscan_iter('a')) - assert set(members) == set([b('1'), b('2'), b('3')]) - members = list(r.sscan_iter('a', match=b('1'))) - assert set(members) == set([b('1')]) + assert set(members) == set([b'1', b'2', b'3']) + members = list(r.sscan_iter('a', match=b'1')) + assert set(members) == set([b'1']) def test_hscan(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) cursor, dic = r.hscan('a') assert cursor == 0 - assert dic == {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} + assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'} _, dic = r.hscan('a', match='a') - assert dic == {b('a'): b('1')} + assert dic == {b'a': b'1'} def test_hscan_iter(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) dic = dict(r.hscan_iter('a')) - assert dic == {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} + assert dic == {b'a': b'1', b'b': b'2', b'c': b'3'} dic = dict(r.hscan_iter('a', match='a')) - assert dic == {b('a'): b('1')} + assert dic == {b'a': b'1'} def test_zscan(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') cursor, pairs = r.zscan('a') assert cursor == 0 - assert set(pairs) == set([(b('a'), 1), (b('b'), 2), (b('c'), 3)]) + assert set(pairs) == set([(b'a', 1), (b'b', 2), (b'c', 3)]) _, pairs = r.zscan('a', match='a') - assert set(pairs) == set([(b('a'), 1)]) + assert set(pairs) == set([(b'a', 1)]) def test_zscan_iter(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') pairs = list(r.zscan_iter('a')) - assert set(pairs) == set([(b('a'), 1), (b('b'), 2), (b('c'), 3)]) + assert set(pairs) == set([(b'a', 1), (b'b', 2), (b'c', 3)]) pairs = list(r.zscan_iter('a', match='a')) - assert set(pairs) == set([(b('a'), 1)]) + assert set(pairs) == set([(b'a', 1)]) # SET COMMANDS def test_sadd(self, r): - members = set([b('1'), b('2'), b('3')]) + members = set([b'1', b'2', b'3']) r.sadd('a', *members) assert r.smembers('a') == members @@ -713,17 +710,17 @@ def test_scard(self, r): def test_sdiff(self, r): r.sadd('a{foo}', '1', '2', '3') - assert r.sdiff('a{foo}', 'b{foo}') == set([b('1'), b('2'), b('3')]) + assert r.sdiff('a{foo}', 'b{foo}') == set([b'1', b'2', b'3']) r.sadd('b{foo}', '2', '3') - assert r.sdiff('a{foo}', 'b{foo}') == set([b('1')]) + assert r.sdiff('a{foo}', 'b{foo}') == set([b'1']) def test_sdiffstore(self, r): r.sadd('a{foo}', '1', '2', '3') assert r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 3 - assert r.smembers('c{foo}') == set([b('1'), b('2'), b('3')]) + assert r.smembers('c{foo}') == set([b'1', b'2', b'3']) r.sadd('b{foo}', '2', '3') assert r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 1 - assert r.smembers('c{foo}') == set([b('1')]) + assert r.smembers('c{foo}') == set([b'1']) # Diff:s that return empty set should not fail r.sdiffstore('d{foo}', 'e{foo}') == 0 @@ -732,7 +729,7 @@ def test_sinter(self, r): r.sadd('a{foo}', '1', '2', '3') assert r.sinter('a{foo}', 'b{foo}') == set() r.sadd('b{foo}', '2', '3') - assert r.sinter('a{foo}', 'b{foo}') == set([b('2'), b('3')]) + assert r.sinter('a{foo}', 'b{foo}') == set([b'2', b'3']) def test_sinterstore(self, r): r.sadd('a{foo}', '1', '2', '3') @@ -740,7 +737,7 @@ def test_sinterstore(self, r): assert r.smembers('c{foo}') == set() r.sadd('b{foo}', '2', '3') assert r.sinterstore('c{foo}', 'a{foo}', 'b{foo}') == 2 - assert r.smembers('c{foo}') == set([b('2'), b('3')]) + assert r.smembers('c{foo}') == set([b'2', b'3']) def test_sismember(self, r): r.sadd('a', '1', '2', '3') @@ -751,29 +748,29 @@ def test_sismember(self, r): def test_smembers(self, r): r.sadd('a', '1', '2', '3') - assert r.smembers('a') == set([b('1'), b('2'), b('3')]) + assert r.smembers('a') == set([b'1', b'2', b'3']) def test_smove(self, r): r.sadd('a{foo}', 'a1', 'a2') r.sadd('b{foo}', 'b1', 'b2') assert r.smove('a{foo}', 'b{foo}', 'a1') - assert r.smembers('a{foo}') == set([b('a2')]) - assert r.smembers('b{foo}') == set([b('b1'), b('b2'), b('a1')]) + assert r.smembers('a{foo}') == set([b'a2']) + assert r.smembers('b{foo}') == set([b'b1', b'b2', b'a1']) def test_spop(self, r): - s = [b('1'), b('2'), b('3')] + s = [b'1', b'2', b'3'] r.sadd('a', *s) value = r.spop('a') assert value in s assert r.smembers('a') == set(s) - set([value]) def test_srandmember(self, r): - s = [b('1'), b('2'), b('3')] + s = [b'1', b'2', b'3'] r.sadd('a', *s) assert r.srandmember('a') in s def test_srandmember_multi_value(self, r): - s = [b('1'), b('2'), b('3')] + s = [b'1', b'2', b'3'] r.sadd('a', *s) randoms = r.srandmember('a', number=2) assert len(randoms) == 2 @@ -783,23 +780,23 @@ def test_srem(self, r): r.sadd('a', '1', '2', '3', '4') assert r.srem('a', '5') == 0 assert r.srem('a', '2', '4') == 2 - assert r.smembers('a') == set([b('1'), b('3')]) + assert r.smembers('a') == set([b'1', b'3']) def test_sunion(self, r): r.sadd('a{foo}', '1', '2') r.sadd('b{foo}', '2', '3') - assert r.sunion('a{foo}', 'b{foo}') == set([b('1'), b('2'), b('3')]) + assert r.sunion('a{foo}', 'b{foo}') == set([b'1', b'2', b'3']) def test_sunionstore(self, r): r.sadd('a{foo}', '1', '2') r.sadd('b{foo}', '2', '3') assert r.sunionstore('c{foo}', 'a{foo}', 'b{foo}') == 3 - assert r.smembers('c{foo}') == set([b('1'), b('2'), b('3')]) + assert r.smembers('c{foo}') == set([b'1', b'2', b'3']) # SORTED SET COMMANDS def test_zadd(self, r): r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, -1) == [b('a1'), b('a2'), b('a3')] + assert r.zrange('a', 0, -1) == [b'a1', b'a2'g, b'a3'] def test_zcard(self, r): r.zadd('a', a1=1, a2=2, a3=3) @@ -837,7 +834,7 @@ def test_zinterstore_sum(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a3'), 8), (b('a1'), 9)] + [(b'a3', 8), (b'a1'), 9] def test_zinterstore_max(self, r): r.zadd('a{foo}', a1=1, a2=1, a3=1) @@ -845,7 +842,7 @@ def test_zinterstore_max(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a3'), 5), (b('a1'), 6)] + [(b'a3', 5), (b'a1', 6)] def test_zinterstore_min(self, r): r.zadd('a{foo}', a1=1, a2=2, a3=3) @@ -853,7 +850,7 @@ def test_zinterstore_min(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a1'), 1), (b('a3'), 3)] + [(b'a1', 1), (b'a3', 3)] def test_zinterstore_with_weight(self, r): r.zadd('a{foo}', a1=1, a2=1, a3=1) @@ -861,48 +858,48 @@ def test_zinterstore_with_weight(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zinterstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a3'), 20), (b('a1'), 23)] + [(b'a3', 20), (b'a1', 23)] def test_zrange(self, r): r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, 1) == [b('a1'), b('a2')] - assert r.zrange('a', 1, 2) == [b('a2'), b('a3')] + assert r.zrange('a', 0, 1) == [b'a1', b'a2'] + assert r.zrange('a', 1, 2) == [b'a2', b'a3'] # withscores assert r.zrange('a', 0, 1, withscores=True) == \ - [(b('a1'), 1.0), (b('a2'), 2.0)] + [(b'a1', 1.0), (b'a2', 2.0)] assert r.zrange('a', 1, 2, withscores=True) == \ - [(b('a2'), 2.0), (b('a3'), 3.0)] + [(b'a2', 2.0), (b'a3', 3.0)] # custom score function assert r.zrange('a', 0, 1, withscores=True, score_cast_func=int) == \ - [(b('a1'), 1), (b('a2'), 2)] + [(b'a1', 1), (b'a2', 2)] def test_zrangebylex(self, r): r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) - assert r.zrangebylex('a', '-', '[c') == [b('a'), b('b'), b('c')] - assert r.zrangebylex('a', '-', '(c') == [b('a'), b('b')] + assert r.zrangebylex('a', '-', '[c') == [b'a', b'b', b'c'] + assert r.zrangebylex('a', '-', '(c') == [b'a', b'b'] assert r.zrangebylex('a', '[aaa', '(g') == \ - [b('b'), b('c'), b('d'), b('e'), b('f')] - assert r.zrangebylex('a', '[f', '+') == [b('f'), b('g')] - assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b('d'), b('e')] + [b'b', b'c', b'd', b'e', b'f'] + assert r.zrangebylex('a', '[f', '+') == [b'f', b'g'] + assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b'd', b'e'] def test_zrangebyscore(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zrangebyscore('a', 2, 4) == [b('a2'), b('a3'), b('a4')] + assert r.zrangebyscore('a', 2, 4) == [b'a2', b'a3', b'a4'] # slicing with start/num assert r.zrangebyscore('a', 2, 4, start=1, num=2) == \ - [b('a3'), b('a4')] + [b'a3', b'a4'] # withscores assert r.zrangebyscore('a', 2, 4, withscores=True) == \ - [(b('a2'), 2.0), (b('a3'), 3.0), (b('a4'), 4.0)] + [(b'a2', 2.0), (b'a3', 3.0), (b'a4', 4.0)] # custom score function assert r.zrangebyscore('a', 2, 4, withscores=True, score_cast_func=int) == \ - [(b('a2'), 2), (b('a3'), 3), (b('a4'), 4)] + [(b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zrank(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) @@ -913,68 +910,68 @@ def test_zrank(self, r): def test_zrem(self, r): r.zadd('a', a1=1, a2=2, a3=3) assert r.zrem('a', 'a2') == 1 - assert r.zrange('a', 0, -1) == [b('a1'), b('a3')] + assert r.zrange('a', 0, -1) == [b'a1', b'a3'] assert r.zrem('a', 'b') == 0 - assert r.zrange('a', 0, -1) == [b('a1'), b('a3')] + assert r.zrange('a', 0, -1) == [b'a1', b'a3'] def test_zrem_multiple_keys(self, r): r.zadd('a', a1=1, a2=2, a3=3) assert r.zrem('a', 'a1', 'a2') == 2 - assert r.zrange('a', 0, 5) == [b('a3')] + assert r.zrange('a', 0, 5) == [b'a3'] def test_zremrangebylex(self, r): r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) assert r.zremrangebylex('a', '-', '[c') == 3 - assert r.zrange('a', 0, -1) == [b('d'), b('e'), b('f'), b('g')] + assert r.zrange('a', 0, -1) == [b'd', b'e', b'f', b'g'] assert r.zremrangebylex('a', '[f', '+') == 2 - assert r.zrange('a', 0, -1) == [b('d'), b('e')] + assert r.zrange('a', 0, -1) == [b'd', b'e'] assert r.zremrangebylex('a', '[h', '+') == 0 - assert r.zrange('a', 0, -1) == [b('d'), b('e')] + assert r.zrange('a', 0, -1) == [b'd', b'e'] def test_zremrangebyrank(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) assert r.zremrangebyrank('a', 1, 3) == 3 - assert r.zrange('a', 0, 5) == [b('a1'), b('a5')] + assert r.zrange('a', 0, 5) == [b'a1', b'a5'] def test_zremrangebyscore(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) assert r.zremrangebyscore('a', 2, 4) == 3 - assert r.zrange('a', 0, -1) == [b('a1'), b('a5')] + assert r.zrange('a', 0, -1) == [b'a1', b'a5'] assert r.zremrangebyscore('a', 2, 4) == 0 - assert r.zrange('a', 0, -1) == [b('a1'), b('a5')] + assert r.zrange('a', 0, -1) == [b'a1', b'a5'] def test_zrevrange(self, r): r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrevrange('a', 0, 1) == [b('a3'), b('a2')] - assert r.zrevrange('a', 1, 2) == [b('a2'), b('a1')] + assert r.zrevrange('a', 0, 1) == [b'a3', b'a2'] + assert r.zrevrange('a', 1, 2) == [b'a2', b'a1'] # withscores assert r.zrevrange('a', 0, 1, withscores=True) == \ - [(b('a3'), 3.0), (b('a2'), 2.0)] + [(b'a3', 3.0), (b'a2', 2.0)] assert r.zrevrange('a', 1, 2, withscores=True) == \ - [(b('a2'), 2.0), (b('a1'), 1.0)] + [(b'a2', 2.0), (b'a1', 1.0)] # custom score function assert r.zrevrange('a', 0, 1, withscores=True, score_cast_func=int) == \ - [(b('a3'), 3.0), (b('a2'), 2.0)] + [(b'a3', 3.0), (b'a2', 2.0)] def test_zrevrangebyscore(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) - assert r.zrevrangebyscore('a', 4, 2) == [b('a4'), b('a3'), b('a2')] + assert r.zrevrangebyscore('a', 4, 2) == [b'a4', b'a3', b'a2'] # slicing with start/num assert r.zrevrangebyscore('a', 4, 2, start=1, num=2) == \ - [b('a3'), b('a2')] + [b'a3', b'a2'] # withscores assert r.zrevrangebyscore('a', 4, 2, withscores=True) == \ - [(b('a4'), 4.0), (b('a3'), 3.0), (b('a2'), 2.0)] + [(b'a4', 4.0), (b'a3', 3.0), (b'a2', 2.0)] # custom score function assert r.zrevrangebyscore('a', 4, 2, withscores=True, score_cast_func=int) == \ - [(b('a4'), 4), (b('a3'), 3), (b('a2'), 2)] + [(b'a4', 4), (b'a3', 3), (b'a2', 2)] def test_zrevrank(self, r): r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) @@ -1002,7 +999,7 @@ def test_zunionstore_sum(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a2'), 3), (b('a4'), 4), (b('a3'), 8), (b('a1'), 9)] + [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] def test_zunionstore_max(self, r): r.zadd('a{foo}', a1=1, a2=1, a3=1) @@ -1010,7 +1007,7 @@ def test_zunionstore_max(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a2'), 2), (b('a4'), 4), (b('a3'), 5), (b('a1'), 6)] + [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] def test_zunionstore_min(self, r): r.zadd('a{foo}', a1=1, a2=2, a3=3) @@ -1018,7 +1015,7 @@ def test_zunionstore_min(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a1'), 1), (b('a2'), 2), (b('a3'), 3), (b('a4'), 4)] + [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zunionstore_with_weight(self, r): r.zadd('a{foo}', a1=1, a2=1, a3=1) @@ -1026,11 +1023,11 @@ def test_zunionstore_with_weight(self, r): r.zadd('c{foo}', a1=6, a3=5, a4=4) assert r.zunionstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b('a2'), 5), (b('a4'), 12), (b('a3'), 20), (b('a1'), 23)] + [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] # # HYPERLOGLOG TESTS def test_pfadd(self, r): - members = set([b('1'), b('2'), b('3')]) + members = set([b'1', b'2', b'3']) assert r.pfadd('a', *members) == 1 assert r.pfadd('a', *members) == 0 assert r.pfcount('a') == len(members) @@ -1038,18 +1035,18 @@ def test_pfadd(self, r): @pytest.mark.xfail(reason="New pfcount in 2.10.5 currently breaks in cluster") @skip_if_server_version_lt('2.8.9') def test_pfcount(self, r): - members = set([b('1'), b('2'), b('3')]) + members = set([b'1', b'2', b'3']) r.pfadd('a', *members) assert r.pfcount('a') == len(members) - members_b = set([b('2'), b('3'), b('4')]) + members_b = set([b'2', b'3', b'4']) r.pfadd('b', *members_b) assert r.pfcount('b') == len(members_b) assert r.pfcount('a', 'b') == len(members_b.union(members)) def test_pfmerge(self, r): - mema = set([b('1'), b('2'), b('3')]) - memb = set([b('2'), b('3'), b('4')]) - memc = set([b('5'), b('6'), b('7')]) + mema = set([b'1', b'2', b'3']) + memb = set([b'2', b'3', b'4']) + memc = set([b'5', b'6', b'7']) r.pfadd('a', *mema) r.pfadd('b', *memb) r.pfadd('c', *memc) @@ -1061,17 +1058,17 @@ def test_pfmerge(self, r): # HASH COMMANDS def test_hget_and_hset(self, r): r.hmset('a', {'1': 1, '2': 2, '3': 3}) - assert r.hget('a', '1') == b('1') - assert r.hget('a', '2') == b('2') - assert r.hget('a', '3') == b('3') + assert r.hget('a', '1') == b'1' + assert r.hget('a', '2') == b'2' + assert r.hget('a', '3') == b'3' # field was updated, redis returns 0 assert r.hset('a', '2', 5) == 0 - assert r.hget('a', '2') == b('5') + assert r.hget('a', '2') == b'5' # field is new, redis returns 1 assert r.hset('a', '4', 4) == 1 - assert r.hget('a', '4') == b('4') + assert r.hget('a', '4') == b'4' # key inside of hash that doesn't exist returns null value assert r.hget('a', 'b') is None @@ -1089,7 +1086,7 @@ def test_hexists(self, r): assert not r.hexists('a', '4') def test_hgetall(self, r): - h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} + h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hmset('a', h) assert r.hgetall('a') == h @@ -1104,7 +1101,7 @@ def test_hincrbyfloat(self, r): assert r.hincrbyfloat('a', '1', 1.2) == 3.2 def test_hkeys(self, r): - h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} + h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hmset('a', h) local_keys = list(iterkeys(h)) remote_keys = r.hkeys('a') @@ -1116,22 +1113,22 @@ def test_hlen(self, r): def test_hmget(self, r): assert r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) - assert r.hmget('a', 'a', 'b', 'c') == [b('1'), b('2'), b('3')] + assert r.hmget('a', 'a', 'b', 'c') == [b'1', b'2', b'3'] def test_hmset(self, r): - h = {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} + h = {b'a': b'1', b'b': b'2', b'c': b'3'} assert r.hmset('a', h) assert r.hgetall('a') == h def test_hsetnx(self, r): # Initially set the hash field assert r.hsetnx('a', '1', 1) - assert r.hget('a', '1') == b('1') + assert r.hget('a', '1') == b'1' assert not r.hsetnx('a', '1', 2) - assert r.hget('a', '1') == b('1') + assert r.hget('a', '1') == b'1' def test_hvals(self, r): - h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} + h = {b'a1': b'1', b'a2': b'2', b'a3': b'3'} r.hmset('a', h) local_vals = list(itervalues(h)) remote_vals = r.hvals('a') @@ -1140,25 +1137,25 @@ def test_hvals(self, r): # SORT def test_sort_basic(self, r): r.rpush('a', '3', '2', '1', '4') - assert r.sort('a') == [b('1'), b('2'), b('3'), b('4')] + assert r.sort('a') == [b'1', b'2', b'3', b'4'] def test_sort_limited(self, r): r.rpush('a', '3', '2', '1', '4') - assert r.sort('a', start=1, num=2) == [b('2'), b('3')] + assert r.sort('a', start=1, num=2) == [b'2', b'3'] def test_sort_by(self, r): r['score:1'] = 8 r['score:2'] = 3 r['score:3'] = 5 r.rpush('a', '3', '2', '1') - assert r.sort('a', by='score:*') == [b('2'), b('3'), b('1')] + assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] def test_sort_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') - assert r.sort('a', get='user:*') == [b('u1'), b('u2'), b('u3')] + assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] def test_sort_get_multi(self, r): r['user:1'] = 'u1' @@ -1166,7 +1163,7 @@ def test_sort_get_multi(self, r): r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', '#')) == \ - [b('u1'), b('1'), b('u2'), b('2'), b('u3'), b('3')] + [b'u1', b'1', b'u2', b'2', b'u3', b'3'] def test_sort_get_groups_two(self, r): r['user:1'] = 'u1' @@ -1174,7 +1171,7 @@ def test_sort_get_groups_two(self, r): r['user:3'] = 'u3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', '#'), groups=True) == \ - [(b('u1'), b('1')), (b('u2'), b('2')), (b('u3'), b('3'))] + [(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')] def test_sort_groups_string_get(self, r): r['user:1'] = 'u1' @@ -1209,24 +1206,24 @@ def test_sort_groups_three_gets(self, r): r['door:3'] = 'd3' r.rpush('a', '2', '3', '1') assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == [ - (b('u1'), b('d1'), b('1')), - (b('u2'), b('d2'), b('2')), - (b('u3'), b('d3'), b('3')) + (b'u1', b'd1', b'1'), + (b'u2', b'd2', b'2'), + (b'u3', b'd3', b'3') ] def test_sort_desc(self, r): r.rpush('a', '2', '3', '1') - assert r.sort('a', desc=True) == [b('3'), b('2'), b('1')] + assert r.sort('a', desc=True) == [b'3', b'2', b'1'] def test_sort_alpha(self, r): r.rpush('a', 'e', 'c', 'b', 'd', 'a') assert r.sort('a', alpha=True) == \ - [b('a'), b('b'), b('c'), b('d'), b('e')] + [b'a', b'b', b'c', b'd', b'e'] def test_sort_store(self, r): r.rpush('a', '2', '3', '1') assert r.sort('a', store='sorted_values') == 3 - assert r.lrange('sorted_values', 0, -1) == [b('1'), b('2'), b('3')] + assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3'] def test_sort_all_options(self, r): r['user:1:username'] = 'zeus' @@ -1253,7 +1250,7 @@ def test_sort_all_options(self, r): store='sorted') assert num == 4 assert r.lrange('sorted', 0, 10) == \ - [b('vodka'), b('milk'), b('gin'), b('apple juice')] + [b'vodka', b'milk', b'gin', b'apple juice'] class TestStrictCommands(object): @@ -1261,16 +1258,16 @@ class TestStrictCommands(object): def test_strict_zadd(self, sr): sr.zadd('a', 1.0, 'a1', 2.0, 'a2', a3=3.0) assert sr.zrange('a', 0, -1, withscores=True) == \ - [(b('a1'), 1.0), (b('a2'), 2.0), (b('a3'), 3.0)] + [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)] def test_strict_lrem(self, sr): sr.rpush('a', 'a1', 'a2', 'a3', 'a1') sr.lrem('a', 0, 'a1') - assert sr.lrange('a', 0, -1) == [b('a2'), b('a3')] + assert sr.lrange('a', 0, -1) == [b'a2', b'a3'] def test_strict_setex(self, sr): assert sr.setex('a', 60, '1') - assert sr['a'] == b('1') + assert sr['a'] == b'1' assert 0 < sr.ttl('a') <= 60 def test_strict_ttl(self, sr): @@ -1291,25 +1288,25 @@ def test_strict_pttl(self, sr): def test_eval(self, sr): res = sr.eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", 2, "A{foo}", "B{foo}", "first", "second") - assert res[0] == b('A{foo}') - assert res[1] == b('B{foo}') - assert res[2] == b('first') - assert res[3] == b('second') + assert res[0] == b'A{foo}' + assert res[1] == b'B{foo}' + assert res[2] == b'first' + assert res[3] == b'second' class TestBinarySave(object): def test_binary_get_set(self, r): assert r.set(' foo bar ', '123') - assert r.get(' foo bar ') == b('123') + assert r.get(' foo bar ') == b'123' assert r.set(' foo\r\nbar\r\n ', '456') - assert r.get(' foo\r\nbar\r\n ') == b('456') + assert r.get(' foo\r\nbar\r\n ') == b'456' assert r.set(' \r\n\t\x07\x13 ', '789') - assert r.get(' \r\n\t\x07\x13 ') == b('789') + assert r.get(' \r\n\t\x07\x13 ') == b'789' assert sorted(r.keys('*')) == \ - [b(' \r\n\t\x07\x13 '), b(' foo\r\nbar\r\n '), b(' foo bar ')] + [b' \r\n\t\x07\x13 ', b' foo\r\nbar\r\n ', b' foo bar '] assert r.delete(' foo bar ') assert r.delete(' foo\r\nbar\r\n ') @@ -1317,9 +1314,9 @@ def test_binary_get_set(self, r): def test_binary_lists(self, r): mapping = { - b('foo bar'): [b('1'), b('2'), b('3')], - b('foo\r\nbar\r\n'): [b('4'), b('5'), b('6')], - b('foo\tbar\x07'): [b('7'), b('8'), b('9')], + b'foo bar': [b'1', b'2', b'3'], + b'foo\r\nbar\r\n': [b'4', b'5', b'6'], + b'foo\tbar\x07': [b'7', b'8', b'9'], } # fill in lists for key, value in iteritems(mapping): @@ -1371,7 +1368,7 @@ def test_large_responses(self, r): # load up 100K of data into a key data = ''.join([ascii_letters] * (100000 // len(ascii_letters))) r['a'] = data - assert r['a'] == b(data) + assert r['a'] == bdata def test_floating_point_encoding(self, r): """ From c87f792f3f697d8c864492e6a5fcd8b0ccb4c6fa Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 16 Dec 2018 15:56:19 +0100 Subject: [PATCH 13/65] Ported in a few new testcases that was not present in the test code --- rediscluster/connection.py | 1 + tests/test_cluster_connection_pool.py | 31 +++++++++++++++++- tests/test_pipeline.py | 45 +++++++++++++++++++++++++++ tests/test_pubsub.py | 21 +++++++++++++ 4 files changed, 97 insertions(+), 1 deletion(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index ac6abc74..62a89c91 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # python std lib +from __future__ import unicode_literals import os import random import threading diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index 5a432c3e..f403e684 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement import os import re import time @@ -429,6 +428,36 @@ def test_db_in_querystring(self): 'password': None, } + def test_extra_typed_querystring_options(self): + pool = redis.ConnectionPool.from_url( + 'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10' + '&socket_keepalive=&retry_on_timeout=Yes&max_connections=10' + ) + + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6379, + 'db': 2, + 'socket_timeout': 20.0, + 'socket_connect_timeout': 10.0, + 'retry_on_timeout': True, + 'password': None, + } + assert pool.max_connections == 10 + + def test_boolean_parsing(self): + for expected, value in ( + (None, None), + (None, ''), + (False, 0), (False, '0'), + (False, 'f'), (False, 'F'), (False, 'False'), + (False, 'n'), (False, 'N'), (False, 'No'), + (True, 1), (True, '1'), + (True, 'y'), (True, 'Y'), (True, 'Yes'), + ): + assert expected is to_bool(value) + def test_extra_querystring_options(self): pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2') assert pool.connection_class == redis.Connection diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index e8750439..b49fadeb 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -143,6 +143,34 @@ def test_exec_error_raised(self, r): assert pipe.set('z', 'zzz').execute() == [True] assert r['z'] == b'zzz' + def test_transaction_with_empty_error_command(self, r): + """ + Commands with custom EMPTY_ERROR functionality return their default + values in the pipeline no matter the raise_on_error preference + """ + for error_switch in (True, False): + with r.pipeline() as pipe: + pipe.set('a', 1).mget([]).set('c', 3) + result = pipe.execute(raise_on_error=error_switch) + + assert result[0] + assert result[1] == [] + assert result[2] + + def test_pipeline_with_empty_error_command(self, r): + """ + Commands with custom EMPTY_ERROR functionality return their default + values in the pipeline no matter the raise_on_error preference + """ + for error_switch in (True, False): + with r.pipeline(transaction=False) as pipe: + pipe.set('a', 1).mget([]).set('c', 3) + result = pipe.execute(raise_on_error=error_switch) + + assert result[0] + assert result[1] == [] + assert result[2] + def test_parse_error_raised(self, r): with r.pipeline() as pipe: # the zrem is invalid because we don't pass any keys to it @@ -257,6 +285,23 @@ def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): assert r[key] == b'1' + def test_pipeline_with_bitfield(self, r): + with r.pipeline() as pipe: + pipe.set('a', '1') + bf = pipe.bitfield('b') + pipe2 = (bf + .set('u8', 8, 255) + .get('u8', 0) + .get('u4', 8) # 1111 + .get('u4', 12) # 1111 + .get('u4', 13) # 1110 + .execute()) + pipe.get('a') + response = pipe.execute() + + assert pipe == pipe2 + assert response == [True, [0, 0, 15, 15, 14], b'1'] + def test_blocked_methods(self, r): """ Currently some method calls on a Cluster pipeline diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index e0ea2837..556f584f 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -504,3 +504,24 @@ def test_pubsub_numpat(self, r): r.pubsub(ignore_subscribe_messages=True).psubscribe('*oo', '*ar', 'b*z') assert r.pubsub_numpat() == 3 + +class TestPubSubPings(object): + + @skip_if_server_version_lt('3.0.0') + def test_send_pubsub_ping(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe('foo') + p.ping() + assert wait_for_message(p) == make_message(type='pong', channel=None, + data='', + pattern=None) + + @skip_if_server_version_lt('3.0.0') + @pytest.mark.xfail(reason="Pattern pubsub do not work currently") + def test_send_pubsub_ping_message(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe('foo') + p.ping(message='hello world') + assert wait_for_message(p) == make_message(type='pong', channel=None, + data='hello world', + pattern=None) From 11f315713a30f26a1fda32278cc114851c0acb4e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 26 Dec 2018 14:09:59 +0100 Subject: [PATCH 14/65] More unicode, bytestring fixes. Fixed so testing acctually works by fixing StrictCluterPipeline imports everywhere. Lots of old set syntax cleanup --- rediscluster/__init__.py | 4 +- rediscluster/client.py | 6 +- rediscluster/nodemanager.py | 2 +- rediscluster/pipeline.py | 122 +++++++++++++------------- tests/test_cluster_obj.py | 2 +- tests/test_commands.py | 167 ++++++++++++++++++------------------ tests/test_utils.py | 2 +- 7 files changed, 152 insertions(+), 153 deletions(-) diff --git a/rediscluster/__init__.py b/rediscluster/__init__.py index bd9c5437..7bcc442a 100644 --- a/rediscluster/__init__.py +++ b/rediscluster/__init__.py @@ -5,14 +5,14 @@ # Import shortcut from .client import RedisCluster -from .pipeline import StrictClusterPipeline +from .pipeline import ClusterPipeline from .pubsub import ClusterPubSub # Monkey patch RedisCluster class into redis for easy access import redis setattr(redis, "RedisCluster", RedisCluster) setattr(redis, "ClusterPubSub", ClusterPubSub) -setattr(redis, "StrictClusterPipeline", StrictClusterPipeline) +setattr(redis, "ClusterPipeline", ClusterPipeline) # Major, Minor, Fix version __version__ = (2, 0, 0) diff --git a/rediscluster/client.py b/rediscluster/client.py index e37178eb..27ced430 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -36,7 +36,7 @@ from redis import Redis from redis.client import list_or_args, parse_info from redis.connection import Token -from redis._compat import iteritems, basestring, b, izip, nativestr, long +from redis._compat import iteritems, basestring, izip, nativestr, long from redis.exceptions import RedisError, ResponseError, TimeoutError, DataError, ConnectionError, BusyLoadingError @@ -282,7 +282,7 @@ def pipeline(self, transaction=None, shard_hint=None): if transaction: raise RedisClusterException("transaction is deprecated in cluster mode") - return StrictClusterPipeline( + return ClusterPipeline( connection_pool=self.connection_pool, startup_nodes=self.connection_pool.nodes.startup_nodes, result_callbacks=self.result_callbacks, @@ -1245,4 +1245,4 @@ def _random_id(self, size=16, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) -from rediscluster.pipeline import StrictClusterPipeline +from rediscluster.pipeline import ClusterPipeline diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index ed936c94..6644e9ea 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -9,7 +9,7 @@ # 3rd party imports from redis import Redis -from redis._compat import b, unicode, bytes, long, basestring +from redis._compat import unicode, bytes, long, basestring from redis import ConnectionError, TimeoutError, ResponseError diff --git a/rediscluster/pipeline.py b/rediscluster/pipeline.py index 2e16dac7..29ec8793 100644 --- a/rediscluster/pipeline.py +++ b/rediscluster/pipeline.py @@ -221,7 +221,7 @@ def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=T for c in attempt: try: # send each command individually like we do in the main client. - c.result = super(StrictClusterPipeline, self).execute_command(*c.args, **c.options) + c.result = super(ClusterPipeline, self).execute_command(*c.args, **c.options) except RedisError as e: c.result = e @@ -296,66 +296,66 @@ def inner(*args, **kwargs): # Blocked pipeline commands -StrictClusterPipeline.bgrewriteaof = block_pipeline_command(Redis.bgrewriteaof) -StrictClusterPipeline.bgsave = block_pipeline_command(Redis.bgsave) -StrictClusterPipeline.bitop = block_pipeline_command(Redis.bitop) -StrictClusterPipeline.brpoplpush = block_pipeline_command(Redis.brpoplpush) -StrictClusterPipeline.client_getname = block_pipeline_command(Redis.client_getname) -StrictClusterPipeline.client_kill = block_pipeline_command(Redis.client_kill) -StrictClusterPipeline.client_list = block_pipeline_command(Redis.client_list) -StrictClusterPipeline.client_setname = block_pipeline_command(Redis.client_setname) -StrictClusterPipeline.config_get = block_pipeline_command(Redis.config_get) -StrictClusterPipeline.config_resetstat = block_pipeline_command(Redis.config_resetstat) -StrictClusterPipeline.config_rewrite = block_pipeline_command(Redis.config_rewrite) -StrictClusterPipeline.config_set = block_pipeline_command(Redis.config_set) -StrictClusterPipeline.dbsize = block_pipeline_command(Redis.dbsize) -StrictClusterPipeline.echo = block_pipeline_command(Redis.echo) -StrictClusterPipeline.evalsha = block_pipeline_command(Redis.evalsha) -StrictClusterPipeline.flushall = block_pipeline_command(Redis.flushall) -StrictClusterPipeline.flushdb = block_pipeline_command(Redis.flushdb) -StrictClusterPipeline.info = block_pipeline_command(Redis.info) -StrictClusterPipeline.keys = block_pipeline_command(Redis.keys) -StrictClusterPipeline.lastsave = block_pipeline_command(Redis.lastsave) -StrictClusterPipeline.mget = block_pipeline_command(Redis.mget) -StrictClusterPipeline.move = block_pipeline_command(Redis.move) -StrictClusterPipeline.mset = block_pipeline_command(Redis.mset) -StrictClusterPipeline.msetnx = block_pipeline_command(Redis.msetnx) -StrictClusterPipeline.pfmerge = block_pipeline_command(Redis.pfmerge) -StrictClusterPipeline.pfcount = block_pipeline_command(Redis.pfcount) -StrictClusterPipeline.ping = block_pipeline_command(Redis.ping) -StrictClusterPipeline.publish = block_pipeline_command(Redis.publish) -StrictClusterPipeline.randomkey = block_pipeline_command(Redis.randomkey) -StrictClusterPipeline.rename = block_pipeline_command(Redis.rename) -StrictClusterPipeline.renamenx = block_pipeline_command(Redis.renamenx) -StrictClusterPipeline.rpoplpush = block_pipeline_command(Redis.rpoplpush) -StrictClusterPipeline.save = block_pipeline_command(Redis.save) -StrictClusterPipeline.scan = block_pipeline_command(Redis.scan) -StrictClusterPipeline.script_exists = block_pipeline_command(Redis.script_exists) -StrictClusterPipeline.script_flush = block_pipeline_command(Redis.script_flush) -StrictClusterPipeline.script_kill = block_pipeline_command(Redis.script_kill) -StrictClusterPipeline.script_load = block_pipeline_command(Redis.script_load) -StrictClusterPipeline.sdiff = block_pipeline_command(Redis.sdiff) -StrictClusterPipeline.sdiffstore = block_pipeline_command(Redis.sdiffstore) -StrictClusterPipeline.sentinel_get_master_addr_by_name = block_pipeline_command(Redis.sentinel_get_master_addr_by_name) -StrictClusterPipeline.sentinel_master = block_pipeline_command(Redis.sentinel_master) -StrictClusterPipeline.sentinel_masters = block_pipeline_command(Redis.sentinel_masters) -StrictClusterPipeline.sentinel_monitor = block_pipeline_command(Redis.sentinel_monitor) -StrictClusterPipeline.sentinel_remove = block_pipeline_command(Redis.sentinel_remove) -StrictClusterPipeline.sentinel_sentinels = block_pipeline_command(Redis.sentinel_sentinels) -StrictClusterPipeline.sentinel_set = block_pipeline_command(Redis.sentinel_set) -StrictClusterPipeline.sentinel_slaves = block_pipeline_command(Redis.sentinel_slaves) -StrictClusterPipeline.shutdown = block_pipeline_command(Redis.shutdown) -StrictClusterPipeline.sinter = block_pipeline_command(Redis.sinter) -StrictClusterPipeline.sinterstore = block_pipeline_command(Redis.sinterstore) -StrictClusterPipeline.slaveof = block_pipeline_command(Redis.slaveof) -StrictClusterPipeline.slowlog_get = block_pipeline_command(Redis.slowlog_get) -StrictClusterPipeline.slowlog_len = block_pipeline_command(Redis.slowlog_len) -StrictClusterPipeline.slowlog_reset = block_pipeline_command(Redis.slowlog_reset) -StrictClusterPipeline.smove = block_pipeline_command(Redis.smove) -StrictClusterPipeline.sort = block_pipeline_command(Redis.sort) -StrictClusterPipeline.sunion = block_pipeline_command(Redis.sunion) -StrictClusterPipeline.sunionstore = block_pipeline_command(Redis.sunionstore) -StrictClusterPipeline.time = block_pipeline_command(Redis.time) +ClusterPipeline.bgrewriteaof = block_pipeline_command(Redis.bgrewriteaof) +ClusterPipeline.bgsave = block_pipeline_command(Redis.bgsave) +ClusterPipeline.bitop = block_pipeline_command(Redis.bitop) +ClusterPipeline.brpoplpush = block_pipeline_command(Redis.brpoplpush) +ClusterPipeline.client_getname = block_pipeline_command(Redis.client_getname) +ClusterPipeline.client_kill = block_pipeline_command(Redis.client_kill) +ClusterPipeline.client_list = block_pipeline_command(Redis.client_list) +ClusterPipeline.client_setname = block_pipeline_command(Redis.client_setname) +ClusterPipeline.config_get = block_pipeline_command(Redis.config_get) +ClusterPipeline.config_resetstat = block_pipeline_command(Redis.config_resetstat) +ClusterPipeline.config_rewrite = block_pipeline_command(Redis.config_rewrite) +ClusterPipeline.config_set = block_pipeline_command(Redis.config_set) +ClusterPipeline.dbsize = block_pipeline_command(Redis.dbsize) +ClusterPipeline.echo = block_pipeline_command(Redis.echo) +ClusterPipeline.evalsha = block_pipeline_command(Redis.evalsha) +ClusterPipeline.flushall = block_pipeline_command(Redis.flushall) +ClusterPipeline.flushdb = block_pipeline_command(Redis.flushdb) +ClusterPipeline.info = block_pipeline_command(Redis.info) +ClusterPipeline.keys = block_pipeline_command(Redis.keys) +ClusterPipeline.lastsave = block_pipeline_command(Redis.lastsave) +ClusterPipeline.mget = block_pipeline_command(Redis.mget) +ClusterPipeline.move = block_pipeline_command(Redis.move) +ClusterPipeline.mset = block_pipeline_command(Redis.mset) +ClusterPipeline.msetnx = block_pipeline_command(Redis.msetnx) +ClusterPipeline.pfmerge = block_pipeline_command(Redis.pfmerge) +ClusterPipeline.pfcount = block_pipeline_command(Redis.pfcount) +ClusterPipeline.ping = block_pipeline_command(Redis.ping) +ClusterPipeline.publish = block_pipeline_command(Redis.publish) +ClusterPipeline.randomkey = block_pipeline_command(Redis.randomkey) +ClusterPipeline.rename = block_pipeline_command(Redis.rename) +ClusterPipeline.renamenx = block_pipeline_command(Redis.renamenx) +ClusterPipeline.rpoplpush = block_pipeline_command(Redis.rpoplpush) +ClusterPipeline.save = block_pipeline_command(Redis.save) +ClusterPipeline.scan = block_pipeline_command(Redis.scan) +ClusterPipeline.script_exists = block_pipeline_command(Redis.script_exists) +ClusterPipeline.script_flush = block_pipeline_command(Redis.script_flush) +ClusterPipeline.script_kill = block_pipeline_command(Redis.script_kill) +ClusterPipeline.script_load = block_pipeline_command(Redis.script_load) +ClusterPipeline.sdiff = block_pipeline_command(Redis.sdiff) +ClusterPipeline.sdiffstore = block_pipeline_command(Redis.sdiffstore) +ClusterPipeline.sentinel_get_master_addr_by_name = block_pipeline_command(Redis.sentinel_get_master_addr_by_name) +ClusterPipeline.sentinel_master = block_pipeline_command(Redis.sentinel_master) +ClusterPipeline.sentinel_masters = block_pipeline_command(Redis.sentinel_masters) +ClusterPipeline.sentinel_monitor = block_pipeline_command(Redis.sentinel_monitor) +ClusterPipeline.sentinel_remove = block_pipeline_command(Redis.sentinel_remove) +ClusterPipeline.sentinel_sentinels = block_pipeline_command(Redis.sentinel_sentinels) +ClusterPipeline.sentinel_set = block_pipeline_command(Redis.sentinel_set) +ClusterPipeline.sentinel_slaves = block_pipeline_command(Redis.sentinel_slaves) +ClusterPipeline.shutdown = block_pipeline_command(Redis.shutdown) +ClusterPipeline.sinter = block_pipeline_command(Redis.sinter) +ClusterPipeline.sinterstore = block_pipeline_command(Redis.sinterstore) +ClusterPipeline.slaveof = block_pipeline_command(Redis.slaveof) +ClusterPipeline.slowlog_get = block_pipeline_command(Redis.slowlog_get) +ClusterPipeline.slowlog_len = block_pipeline_command(Redis.slowlog_len) +ClusterPipeline.slowlog_reset = block_pipeline_command(Redis.slowlog_reset) +ClusterPipeline.smove = block_pipeline_command(Redis.smove) +ClusterPipeline.sort = block_pipeline_command(Redis.sort) +ClusterPipeline.sunion = block_pipeline_command(Redis.sunion) +ClusterPipeline.sunionstore = block_pipeline_command(Redis.sunionstore) +ClusterPipeline.time = block_pipeline_command(Redis.time) class PipelineCommand(object): diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 34bf0c5b..9728b0ec 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -16,7 +16,7 @@ # 3rd party imports from mock import patch, Mock, MagicMock -from redis._compat import b, unicode +from redis._compat import unicode from redis import Redis import pytest diff --git a/tests/test_commands.py b/tests/test_commands.py index fe251b14..9f195aff 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # python std lib -from __future__ import with_statement +from __future__ import unicode_literals import datetime import re import time @@ -12,7 +12,7 @@ # 3rd party imports import pytest -from redis._compat import unichr, u, b, ascii_letters, iteritems, iterkeys, itervalues, unicode +from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode from redis.client import parse_info from redis.exceptions import ResponseError, DataError, RedisError @@ -166,6 +166,11 @@ def test_decr(self, r): assert r.decr('a', amount=5) == -7 assert r['a'] == b'-7' + def test_decrby(self, r): + assert r.decrby('a', amount=2) == -2 + assert r.decrby('a', amount=3) == -5 + assert r['a'] == b'-5' + def test_delete(self, r): assert r.delete('a') == 0 r['a'] = 'foo' @@ -193,7 +198,10 @@ def test_dump_and_restore(self, r): def test_exists(self, r): assert not r.exists('a') r['a'] = 'foo' - assert r.exists('a') + assert r.exists('a') == 0 + r['b'] = 'bar' + assert r.exists('a') == 1 + assert r.exists('a', 'b') == 2 def test_exists_contains(self, r): assert 'a' not in r @@ -296,14 +304,15 @@ def test_incrbyfloat(self, r): def test_keys(self, r): keys = r.keys() assert keys == [] - keys_with_underscores = set(['test_a', 'test_b']) - keys = keys_with_underscores.union(set(['testc'])) + keys_with_underscores = {b'test_a', b'test_b'} + keys = keys_with_underscores.union({b'testc'}) for key in keys: r[key] = 1 assert set(r.keys(pattern='test_*')) == {b"{0}".format(k) for k in keys_with_underscores} assert set(r.keys(pattern='test*')) == {b"{0}".format(k) for k in keys} def test_mget(self, r): + assert r.mget([]) == [] assert r.mget(['a', 'b']) == [None, None] r['a'] = '1' r['b'] = '2' @@ -316,12 +325,6 @@ def test_mset(self, r): for k, v in iteritems(d): assert r[k] == v - def test_mset_kwargs(self, r): - d = {'a': b'1', 'b': b'2', 'c': b'3'} - assert r.mset(**d) - for k, v in iteritems(d): - assert r[k] == v - def test_msetnx(self, r): d = {'a': b'1', 'b': b'2', 'c': b'3'} assert r.msetnx(d) @@ -331,15 +334,6 @@ def test_msetnx(self, r): assert r[k] == v assert r.get('d') is None - def test_msetnx_kwargs(self, r): - d = {'a': b'1', 'b': b'2', 'c': b'3'} - assert r.msetnx(**d) - d2 = {'a': b'x', 'd': b'4'} - assert not r.msetnx(**d2) - for k, v in iteritems(d): - assert r[k] == v - assert r.get('d') is None - def test_pexpire(self, r): assert not r.pexpire('a', 60000) r['a'] = 'foo' @@ -562,7 +556,7 @@ def test_lpushx(self, r): assert r.lrange('a', 0, -1) == [] r.rpush('a', '1', '2', '3') assert r.lpushx('a', '4') == 4 - assert r.lrange('a', 0, -1) == [4'4', b'1', b'2', b'3'] + assert r.lrange('a', 0, -1) == [b'4', b'1', b'2', b'3'] def test_lrange(self, r): r.rpush('a', '1', '2', '3', '4', '5') @@ -571,11 +565,16 @@ def test_lrange(self, r): assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4', b'5'] def test_lrem(self, r): - r.rpush('a', '1', '1', '1', '1') - assert r.lrem('a', '1', 1) == 1 - assert r.lrange('a', 0, -1) == [b'1', b'1', b'1'] - assert r.lrem('a', 0, '1') == 3 - assert r.lrange('a', 0, -1) == [] + r.rpush('a', 'Z', 'b', 'Z', 'Z', 'c', 'Z', 'Z') + # remove the first 'Z' item + assert r.lrem('a', 1, 'Z') == 1 + assert r.lrange('a', 0, -1) == [b'b', b'Z', b'Z', b'c', b'Z', b'Z'] + # remove the last 2 'Z' items + assert r.lrem('a', -2, 'Z') == 2 + assert r.lrange('a', 0, -1) == [b'b', b'Z', b'Z', b'c'] + # remove all 'Z' items + assert r.lrem('a', 0, 'Z') == 2 + assert r.lrange('a', 0, -1) == [b'b', b'c'] def test_lset(self, r): r.rpush('a', '1', '2', '3') @@ -626,14 +625,14 @@ def test_scan(self, r): assert cursor == 0 keys += partial_keys - assert set(keys) == set([b'a', b'b', b'c']) + assert set(keys) == {b'a', b'b', b'c'} keys = [] for result in r.scan(match='a').values(): cursor, partial_keys = result assert cursor == 0 keys += partial_keys - assert set(keys) == set([b'a']) + assert set(keys) == {b'a'} def test_scan_iter(self, r): alphabet = 'abcdefghijklmnopqrstuvwABCDEFGHIJKLMNOPQRSTUVW' @@ -644,29 +643,29 @@ def test_scan_iter(self, r): assert set(keys) == set(expected_result) keys = list(r.scan_iter(match='a')) - assert set(keys) == set([b'a']) + assert set(keys) == {b'a'} r.set('Xa', 1) r.set('Xb', 2) r.set('Xc', 3) keys = list(r.scan_iter('X*', count=1000)) assert len(keys) == 3 - assert set(keys) == set([b'Xa', b'Xb', b'Xc']) + assert set(keys) == {b'Xa', b'Xb', b'Xc'} def test_sscan(self, r): r.sadd('a', 1, 2, 3) cursor, members = r.sscan('a') assert cursor == 0 - assert set(members) == set([b'1', b'2', b'3']) + assert set(members) == {b'a', b'2', b'3'} _, members = r.sscan('a', match=b'1') - assert set(members) == set([b'1']) + assert set(members) == {b'1'} def test_sscan_iter(self, r): r.sadd('a', 1, 2, 3) members = list(r.sscan_iter('a')) - assert set(members) == set([b'1', b'2', b'3']) + assert set(members) == {b'1', b'2', b'3'} members = list(r.sscan_iter('a', match=b'1')) - assert set(members) == set([b'1']) + assert set(members) == {b'1'} def test_hscan(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) @@ -687,16 +686,16 @@ def test_zscan(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') cursor, pairs = r.zscan('a') assert cursor == 0 - assert set(pairs) == set([(b'a', 1), (b'b', 2), (b'c', 3)]) + assert set(pairs) == {(b'a', 1), (b'b, 2'), (b'c', 3)} _, pairs = r.zscan('a', match='a') - assert set(pairs) == set([(b'a', 1)]) + assert set(pairs == {(b'a', 1)}) def test_zscan_iter(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') pairs = list(r.zscan_iter('a')) - assert set(pairs) == set([(b'a', 1), (b'b', 2), (b'c', 3)]) + assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)} pairs = list(r.zscan_iter('a', match='a')) - assert set(pairs) == set([(b'a', 1)]) + assert set(pairs) == {(b'a', 1)} # SET COMMANDS def test_sadd(self, r): @@ -748,21 +747,21 @@ def test_sismember(self, r): def test_smembers(self, r): r.sadd('a', '1', '2', '3') - assert r.smembers('a') == set([b'1', b'2', b'3']) + assert r.smembers('a') == {b'1', b'2', b'3'} def test_smove(self, r): r.sadd('a{foo}', 'a1', 'a2') r.sadd('b{foo}', 'b1', 'b2') assert r.smove('a{foo}', 'b{foo}', 'a1') - assert r.smembers('a{foo}') == set([b'a2']) - assert r.smembers('b{foo}') == set([b'b1', b'b2', b'a1']) + assert r.smembers('a{foo}') == {b'a2'} + assert r.smembers('b{foo}') == {b'b1', b'b2', b'a1'} def test_spop(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) value = r.spop('a') assert value in s - assert r.smembers('a') == set(s) - set([value]) + assert r.smembers('a') == set(s) - {value} def test_srandmember(self, r): s = [b'1', b'2', b'3'] @@ -796,14 +795,14 @@ def test_sunionstore(self, r): # SORTED SET COMMANDS def test_zadd(self, r): r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, -1) == [b'a1', b'a2'g, b'a3'] + assert r.zrange('a', 0, -1) == [b'a1', b'a2', b'a3'] def test_zcard(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcard('a') == 3 def test_zcount(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcount('a', '-inf', '+inf') == 3 assert r.zcount('a', 1, 2) == 2 assert r.zcount('a', 10, 20) == 0 @@ -816,7 +815,7 @@ def test_zincrby(self, r): assert r.zscore('a', 'a3') == 8.0 def test_zlexcount(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zlexcount('a', '-', '+') == 7 assert r.zlexcount('a', '[b', '[f') == 5 @@ -829,33 +828,33 @@ def test_zinterstore_fail_cross_slot(self, r): assert re.search('ClusterCrossSlotError', str(excinfo)) def test_zinterstore_sum(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1'), 9] def test_zinterstore_max(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] def test_zinterstore_min(self, r): - r.zadd('a{foo}', a1=1, a2=2, a3=3) - r.zadd('b{foo}', a1=2, a2=3, a3=5) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] def test_zinterstore_with_weight(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a3', 20), (b'a1', 23)] @@ -902,25 +901,25 @@ def test_zrangebyscore(self, r): [(b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zrank(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrank('a', 'a1') == 0 assert r.zrank('a', 'a2') == 1 assert r.zrank('a', 'a6') is None def test_zrem(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrem('a', 'a2') == 1 assert r.zrange('a', 0, -1) == [b'a1', b'a3'] assert r.zrem('a', 'b') == 0 assert r.zrange('a', 0, -1) == [b'a1', b'a3'] def test_zrem_multiple_keys(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrem('a', 'a1', 'a2') == 2 assert r.zrange('a', 0, 5) == [b'a3'] def test_zremrangebylex(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zremrangebylex('a', '-', '[c') == 3 assert r.zrange('a', 0, -1) == [b'd', b'e', b'f', b'g'] assert r.zremrangebylex('a', '[f', '+') == 2 @@ -929,7 +928,7 @@ def test_zremrangebylex(self, r): assert r.zrange('a', 0, -1) == [b'd', b'e'] def test_zremrangebyrank(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zremrangebyrank('a', 1, 3) == 3 assert r.zrange('a', 0, 5) == [b'a1', b'a5'] @@ -974,13 +973,13 @@ def test_zrevrangebyscore(self, r): [(b'a4', 4), (b'a3', 3), (b'a2', 2)] def test_zrevrank(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrevrank('a', 'a1') == 4 assert r.zrevrank('a', 'a2') == 3 assert r.zrevrank('a', 'a6') is None def test_zscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zscore('a', 'a1') == 1.0 assert r.zscore('a', 'a2') == 2.0 assert r.zscore('a', 'a4') is None @@ -994,40 +993,40 @@ def test_zunionstore_fail_crossslot(self, r): assert re.search('ClusterCrossSlotError', str(excinfo)) def test_zunionstore_sum(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] def test_zunionstore_max(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] def test_zunionstore_min(self, r): - r.zadd('a{foo}', a1=1, a2=2, a3=3) - r.zadd('b{foo}', a1=2, a2=2, a3=4) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zunionstore_with_weight(self, r): - r.zadd('a{foo}', a1=1, a2=1, a3=1) - r.zadd('b{foo}', a1=2, a2=2, a3=2) - r.zadd('c{foo}', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zunionstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 4 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] # # HYPERLOGLOG TESTS def test_pfadd(self, r): - members = set([b'1', b'2', b'3']) + members = {b'1', b'2', b'3'} assert r.pfadd('a', *members) == 1 assert r.pfadd('a', *members) == 0 assert r.pfcount('a') == len(members) @@ -1035,18 +1034,18 @@ def test_pfadd(self, r): @pytest.mark.xfail(reason="New pfcount in 2.10.5 currently breaks in cluster") @skip_if_server_version_lt('2.8.9') def test_pfcount(self, r): - members = set([b'1', b'2', b'3']) + members = {b'1', b'2', b'3'} r.pfadd('a', *members) assert r.pfcount('a') == len(members) - members_b = set([b'2', b'3', b'4']) + members_b = {b'2', b'3', b'4'} r.pfadd('b', *members_b) assert r.pfcount('b') == len(members_b) assert r.pfcount('a', 'b') == len(members_b.union(members)) def test_pfmerge(self, r): - mema = set([b'1', b'2', b'3']) - memb = set([b'2', b'3', b'4']) - memc = set([b'5', b'6', b'7']) + mema = {b'1', b'2', b'3'} + memb = {b'2', b'3', b'4'} + memc = {b'5', b'6', b'7'} r.pfadd('a', *mema) r.pfadd('b', *memb) r.pfadd('c', *memc) @@ -1376,5 +1375,5 @@ def test_floating_point_encoding(self, r): precision. """ timestamp = 1349673917.939762 - r.zadd('a', timestamp, 'a1') + r.zadd('a', {'a1': timestamp}) assert r.zscore('a', 'a1') == timestamp diff --git a/tests/test_utils.py b/tests/test_utils.py index 7ee9278e..32c21422 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -19,7 +19,7 @@ # 3rd party imports import pytest -from redis._compat import unicode, b +from redis._compat import unicode def test_parse_cluster_slots(): From a0fbc7c1ef1f8450743e819b1f6ca9d3cab6d3cc Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 10 Feb 2019 11:00:53 +0100 Subject: [PATCH 15/65] Binary string fixes and updates of tests --- tests/test_commands.py | 79 +++++++++++++++++++++++++++++++++--------- tests/test_utils.py | 12 +++---- 2 files changed, 68 insertions(+), 23 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 9f195aff..136ea112 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -7,14 +7,14 @@ import time # rediscluster imports -from rediscluster.exceptions import RedisClusterException +from rediscluster.exceptions import RedisClusterException, ClusterCrossSlotError from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt # 3rd party imports import pytest from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode from redis.client import parse_info -from redis.exceptions import ResponseError, DataError, RedisError +from redis.exceptions import ResponseError, DataError, RedisError, DataError pytestmark = skip_if_server_version_lt('2.9.0') @@ -30,7 +30,7 @@ class TestRedisCommands(object): @skip_if_server_version_lt('2.9.9') def test_zrevrangebylex(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a'] assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a'] assert r.zrevrangebylex('a', '(g', '[aaa') == \ @@ -196,12 +196,27 @@ def test_dump_and_restore(self, r): assert r['a'] == b'foo' def test_exists(self, r): - assert not r.exists('a') - r['a'] = 'foo' assert r.exists('a') == 0 + r['a'] = 'foo' r['b'] = 'bar' assert r.exists('a') == 1 - assert r.exists('a', 'b') == 2 + assert r.exists('b') == 1 + # This no longer works in cluster. See test_exists_fail_not_same_slots() for failing test + # assert r.exists('a', 'b') == 2 + + def test_exists_fail_not_same_slots(self, r): + """ + This test is conditioned on that the 2 keys will be in different slots + """ + key_a = 'a' + key_b = 'b' + assert r.cluster_keyslot(key_a) != r.cluster_keyslot(key_b) + r[key_a] = 'foo' + r[key_b] = 'bar' + assert r.exists('a') == 1 + assert r.exists('b') == 1 + with pytest.raises(ClusterCrossSlotError): + r.exists('a', 'b') def test_exists_contains(self, r): assert 'a' not in r @@ -239,12 +254,12 @@ def test_get_and_set(self, r): assert r.get('a') is None byte_string = b'value' integer = 5 - unicode_string = unichr(3456) + u'abcd' + unichr(3421) + unicode_string = unichr(3456) + 'abcd' + unichr(3421) assert r.set('byte_string', byte_string) assert r.set('integer', 5) assert r.set('unicode_string', unicode_string) assert r.get('byte_string') == byte_string - assert r.get('integer') == bstr(integer) + assert r.get('integer') == str(integer).encode() assert r.get('unicode_string').decode('utf-8') == unicode_string def test_getitem_and_setitem(self, r): @@ -481,7 +496,7 @@ def test_type(self, r): r.sadd('a', '1') assert r.type('a') == b'set' del r['a'] - r.zadd('a', **{'1': 1}) + r.zadd('a', {'1': 1}) assert r.type('a') == b'zset' # LIST COMMANDS @@ -615,6 +630,7 @@ def test_rpushx(self, r): assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] # SCAN COMMANDS + @pytest.mark.skip(reason="WIP") def test_scan(self, r): r.set('a', 1) r.set('b', 2) @@ -634,6 +650,7 @@ def test_scan(self, r): keys += partial_keys assert set(keys) == {b'a'} + @pytest.mark.skip(reason="WIP") def test_scan_iter(self, r): alphabet = 'abcdefghijklmnopqrstuvwABCDEFGHIJKLMNOPQRSTUVW' for i, c in enumerate(alphabet): @@ -652,6 +669,7 @@ def test_scan_iter(self, r): assert len(keys) == 3 assert set(keys) == {b'Xa', b'Xb', b'Xc'} + @pytest.mark.skip(reason="WIP") def test_sscan(self, r): r.sadd('a', 1, 2, 3) cursor, members = r.sscan('a') @@ -660,6 +678,7 @@ def test_sscan(self, r): _, members = r.sscan('a', match=b'1') assert set(members) == {b'1'} + @pytest.mark.skip(reason="WIP") def test_sscan_iter(self, r): r.sadd('a', 1, 2, 3) members = list(r.sscan_iter('a')) @@ -667,6 +686,7 @@ def test_sscan_iter(self, r): members = list(r.sscan_iter('a', match=b'1')) assert set(members) == {b'1'} + @pytest.mark.skip(reason="WIP") def test_hscan(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) cursor, dic = r.hscan('a') @@ -675,6 +695,7 @@ def test_hscan(self, r): _, dic = r.hscan('a', match='a') assert dic == {b'a': b'1'} + @pytest.mark.skip(reason="WIP") def test_hscan_iter(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) dic = dict(r.hscan_iter('a')) @@ -682,6 +703,7 @@ def test_hscan_iter(self, r): dic = dict(r.hscan_iter('a', match='a')) assert dic == {b'a': b'1'} + @pytest.mark.skip(reason="WIP") def test_zscan(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') cursor, pairs = r.zscan('a') @@ -690,6 +712,7 @@ def test_zscan(self, r): _, pairs = r.zscan('a', match='a') assert set(pairs == {(b'a', 1)}) + @pytest.mark.skip(reason="WIP") def test_zscan_iter(self, r): r.zadd('a', 1, 'a', 2, 'b', 3, 'c') pairs = list(r.zscan_iter('a')) @@ -794,8 +817,22 @@ def test_sunionstore(self, r): # SORTED SET COMMANDS def test_zadd(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zrange('a', 0, -1) == [b'a1', b'a2', b'a3'] + mapping = {'a1': 1.0, 'a2': 2.0, 'a3': 3.0} + r.zadd('a', mapping) + assert r.zrange('a', 0, -1, withscores=True) == \ + [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)] + + # error cases + with pytest.raises(DataError): + r.zadd('a', {}) + + # cannot use both nx and xx options + with pytest.raises(DataError): + r.zadd('a', mapping, nx=True, xx=True) + + # cannot use the incr options with more than one value + with pytest.raises(DataError): + r.zadd('a', mapping, incr=True) def test_zcard(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) @@ -808,9 +845,9 @@ def test_zcount(self, r): assert r.zcount('a', 10, 20) == 0 def test_zincrby(self, r): - r.zadd('a', a1=1, a2=2, a3=3) - assert r.zincrby('a', 'a2') == 3.0 - assert r.zincrby('a', 'a3', amount=5) == 8.0 + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zincrby('a', 1, 'a2') == 3.0 + assert r.zincrby('a', 5, 'a3') == 8.0 assert r.zscore('a', 'a2') == 3.0 assert r.zscore('a', 'a3') == 8.0 @@ -820,9 +857,9 @@ def test_zlexcount(self, r): assert r.zlexcount('a', '[b', '[f') == 5 def test_zinterstore_fail_cross_slot(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('a', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('a', {'a1': 6, 'a2': 5, 'a3': 4}) with pytest.raises(ResponseError) as excinfo: r.zinterstore('d', ['a', 'b', 'c']) assert re.search('ClusterCrossSlotError', str(excinfo)) @@ -835,6 +872,14 @@ def test_zinterstore_sum(self, r): assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1'), 9] + def test_zinterstore_sum(self, r): + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('d', ['a', 'b', 'c']) == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b'a3', 8), (b'a1', 9)] + def test_zinterstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) diff --git a/tests/test_utils.py b/tests/test_utils.py index 32c21422..31dccf18 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -71,16 +71,16 @@ def test_parse_cluster_slots(): parse_cluster_slots(extended_mock_response) mock_binary_response = [ - [0, 5460, [b('172.17.0.2'), 7000], [b('172.17.0.2'), 7003]], - [5461, 10922, [b('172.17.0.2'), 7001], [b('172.17.0.2'), 7004]], - [10923, 16383, [b('172.17.0.2'), 7002], [b('172.17.0.2'), 7005]] + [0, 5460, [b'172.17.0.2', 7000], [b'172.17.0.2', 7003]], + [5461, 10922, [b'172.17.0.2', 7001], [b'172.17.0.2', 7004]], + [10923, 16383, [b'172.17.0.2', 7002], [b'172.17.0.2', 7005]] ] parse_cluster_slots(mock_binary_response) extended_mock_binary_response = [ - [0, 5460, [b('172.17.0.2'), 7000, b('ffd36d8d7cb10d813f81f9662a835f6beea72677')], [b('172.17.0.2'), 7003, b('5c15b69186017ddc25ebfac81e74694fc0c1a160')]], - [5461, 10922, [b('172.17.0.2'), 7001, b('069cda388c7c41c62abe892d9e0a2d55fbf5ffd5')], [b('172.17.0.2'), 7004, b('dc152a08b4cf1f2a0baf775fb86ad0938cb907dc')]], - [10923, 16383, [b('172.17.0.2'), 7002, b('3588b4cf9fc72d57bb262a024747797ead0cf7ea')], [b('172.17.0.2'), 7005, b('a72c02c7d85f4ec3145ab2c411eefc0812aa96b0')]] + [0, 5460, [b'172.17.0.2', 7000, b'ffd36d8d7cb10d813f81f9662a835f6beea72677'], [b'172.17.0.2', 7003, b'5c15b69186017ddc25ebfac81e74694fc0c1a160']], + [5461, 10922, [b'172.17.0.2', 7001, b'069cda388c7c41c62abe892d9e0a2d55fbf5ffd5'], [b'172.17.0.2', 7004, b'dc152a08b4cf1f2a0baf775fb86ad0938cb907dc']], + [10923, 16383, [b'172.17.0.2', 7002, b'3588b4cf9fc72d57bb262a024747797ead0cf7ea'], [b'172.17.0.2', 7005, b'a72c02c7d85f4ec3145ab2c411eefc0812aa96b0']] ] extended_mock_parsed = { From aa195ac29ae657fd57c26ff5ca08f701c4301e08 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 10 Feb 2019 11:05:27 +0100 Subject: [PATCH 16/65] Update .travis.yml --- .travis.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1b8ad1d1..376c0df0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,6 +16,7 @@ install: - "if [[ $REDIS_VERSION == '3.2' ]]; then REDIS_VERSION=3.2 make redis-install; fi" - "if [[ $REDIS_VERSION == '4.0' ]]; then REDIS_VERSION=4.0 make redis-install; fi" - "if [[ $REDIS_VERSION == '5.0' ]]; then REDIS_VERSION=5.0 make redis-install; fi" + - "if [[ $TEST_PYCODESTYLE == '1' ]]; then pip install pycodestyle; fi" - pip install -r dev-requirements.txt - pip install -e . - "if [[ $HIREDIS == '1' ]]; then pip install hiredis; fi" @@ -32,7 +33,7 @@ env: # Redis 5.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=5.0 - HIREDIS=1 REDIS_VERSION=5.0 -script: +script: - make start - coverage erase - coverage run --source rediscluster -p -m py.test @@ -41,6 +42,21 @@ script: after_success: - coverage combine - coveralls + - "if [[ $TEST_PYCODESTYLE == '1' ]]; then pycodestyle --repeat --show-source --exclude=.venv,.tox,dist,docs,build,*.egg,redis_install .; fi" matrix: allow_failures: - python: "nightly" + - python: 2.7 + env: TEST_PYCODESTYLE=1 + - python: 3.6 + env: TEST_PYCODESTYLE=1 + # python 3.7 has to be specified manually in the matrix + # https://github.com/travis-ci/travis-ci/issues/9815 + - python: 3.7 + dist: xenial + sudo: true + env: TEST_HIREDIS=0 + - python: 3.7 + dist: xenial + sudo: true + env: TEST_HIREDIS=1 From 47bacfd656f407c80e3f8a9794ad43e7df3cef01 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 13 Feb 2019 09:52:54 +0100 Subject: [PATCH 17/65] Additional updates and compat fixes --- rediscluster/client.py | 21 +++++++++++---------- tests/test_pipeline.py | 8 ++++---- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 27ced430..034e0d10 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from __future__ import unicode_literals # python std lib import datetime @@ -15,7 +16,7 @@ ) from .exceptions import ( RedisClusterException, AskError, MovedError, ClusterDownError, - ClusterError, TryAgainError, + ClusterError, TryAgainError ) from .pubsub import ClusterPubSub from .utils import ( @@ -942,13 +943,13 @@ def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha= (start is not None and num is None): raise RedisError("RedisError: ``start`` and ``num`` must both be specified") try: - data_type = b(self.type(name)) + data_type = b"{0}".format(self.type(name)) - if data_type == b("none"): + if data_type == b"none": return [] - elif data_type == b("set"): + elif data_type == b"set": data = list(self.smembers(name))[:] - elif data_type == b("list"): + elif data_type == b"list": data = self.lrange(name, 0, -1) else: raise RedisClusterException("Unable to sort data type : {0}".format(data_type)) @@ -969,10 +970,10 @@ def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha= data = self._retrive_data_from_sort(data, get) if store is not None: - if data_type == b("set"): + if data_type == b"set": self.delete(store) self.rpush(store, *data) - elif data_type == b("list"): + elif data_type == b"list": self.delete(store) self.rpush(store, *data) else: @@ -981,7 +982,7 @@ def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha= return len(data) if groups: - if not get or isinstance(get, basestring) or len(get) < 2: + if not get or isinstance(get, (bytes, basestring)) or len(get) < 2: raise DataError('when using "groups" the "get" argument ' 'must be specified and contain at least ' 'two keys') @@ -997,7 +998,7 @@ def _retrive_data_from_sort(self, data, get): Used by sort() """ if get is not None: - if isinstance(get, basestring): + if isinstance(get, (bytes, basestring)): get = [get] new_data = [] for k in data: @@ -1025,7 +1026,7 @@ def _get_single_item(self, k, g): single_item = k else: single_item = None - return b(single_item) + return b"{0}".format(single_item) def _strtod_key_func(self, arg): """ diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index b49fadeb..5f01b919 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -150,11 +150,11 @@ def test_transaction_with_empty_error_command(self, r): """ for error_switch in (True, False): with r.pipeline() as pipe: - pipe.set('a', 1).mget([]).set('c', 3) + pipe.set('a', 1).get("").set('c', 3) result = pipe.execute(raise_on_error=error_switch) assert result[0] - assert result[1] == [] + assert result[1] == None assert result[2] def test_pipeline_with_empty_error_command(self, r): @@ -164,11 +164,11 @@ def test_pipeline_with_empty_error_command(self, r): """ for error_switch in (True, False): with r.pipeline(transaction=False) as pipe: - pipe.set('a', 1).mget([]).set('c', 3) + pipe.set('a', 1).get("").set('c', 3) result = pipe.execute(raise_on_error=error_switch) assert result[0] - assert result[1] == [] + assert result[1] == None assert result[2] def test_parse_error_raised(self, r): From 727f6dd204b33347bf56a01d3a98dd882d5d5b24 Mon Sep 17 00:00:00 2001 From: Vinicius Souza Date: Thu, 10 Jan 2019 18:10:04 -0200 Subject: [PATCH 18/65] enable replace option when performs rename operation --- rediscluster/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index 034e0d10..a2d4742a 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -765,7 +765,7 @@ def msetnx(self, *args, **kwargs): return self.mset(**kwargs) - def rename(self, src, dst): + def rename(self, src, dst, replace=False): """ Rename key ``src`` to ``dst`` @@ -805,7 +805,7 @@ def rename(self, src, dst): ttl = 0 self.delete(dst) - self.restore(dst, ttl, data) + self.restore(dst, ttl, data, replace) self.delete(src) return True From 68077ff8c95c589872cc2f10998ea29ac8ed4f1e Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 5 Mar 2019 10:33:36 +0100 Subject: [PATCH 19/65] Use the new Encoder class --- rediscluster/nodemanager.py | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 6644e9ea..2c69e38d 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -10,6 +10,7 @@ # 3rd party imports from redis import Redis from redis._compat import unicode, bytes, long, basestring +from redis.connection import Encoder from redis import ConnectionError, TimeoutError, ResponseError @@ -37,34 +38,21 @@ def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_covera self.reinitialize_steps = reinitialize_steps or 25 self._skip_full_coverage_check = skip_full_coverage_check self.nodemanager_follow_cluster = nodemanager_follow_cluster + self.encoder = Encoder( + connection_kwargs.get('encoding', 'utf-8'), + connection_kwargs.get('encoding_errors', 'strict'), + connection_kwargs.get('decode_responses', False) + ) if not self.startup_nodes: raise RedisClusterException("No startup nodes provided") - def encode(self, value): - """ - Return a bytestring representation of the value. - This method is copied from Redis' connection.py:Connection.encode - """ - if isinstance(value, bytes): - return value - elif isinstance(value, (int, long)): - value = b(str(value)) - elif isinstance(value, float): - value = b(repr(value)) - elif not isinstance(value, basestring): - value = unicode(value) - if isinstance(value, unicode): - # The encoding should be configurable as in connection.py:Connection.encode - value = value.encode('utf-8') - return value - def keyslot(self, key): """ Calculate keyslot for a given key. Tuned for compatibility with python 2.7.x """ - k = self.encode(key) + k = self.encoder.encode(key) start = k.find(b"{") From 35fe0d42ed1cb7e1e90946ba02071c60d0484904 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:15:44 +0100 Subject: [PATCH 20/65] Fix more byte method conversion calls --- tests/test_cluster_obj.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 9728b0ec..dd96c252 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -392,8 +392,8 @@ def assert_moved_redirection_on_slave(sr, connection_pool_cls, cluster_obj): master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7000', 'port': 7000, 'server_type': 'master'} with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: return_master_mock.return_value = master_value - assert cluster_obj.get('foo16706') == b('foo') - assert return_slave_mock.call_count == 1 + assert cluster_obj.get('foo16706') == b'foo' + assert return_master_mock.call_count == 1 def test_moved_redirection_on_slave_with_default_client(sr): @@ -444,10 +444,19 @@ def test_access_correct_slave_with_readonly_mode_client(sr): 'get_master_node_by_slot', return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) +<<<<<<< HEAD assert b('foo') == readonly_client.get('foo16706') readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') +======= + assert b'foo' == readonly_client.get('foo16706') + assert return_master_mock.call_count == 0 + + readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) + assert b'foo' == readonly_client.get('foo16706') + assert return_master_mock.call_count == 0 +>>>>>>> Fix more byte method conversion calls def test_refresh_using_specific_nodes(r): From 3d14ef5229ffc3d98ac7fdb984327dde789badcb Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:19:05 +0100 Subject: [PATCH 21/65] Fix test test_large_responses --- tests/test_commands.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 136ea112..3795fd72 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -201,7 +201,7 @@ def test_exists(self, r): r['b'] = 'bar' assert r.exists('a') == 1 assert r.exists('b') == 1 - # This no longer works in cluster. See test_exists_fail_not_same_slots() for failing test + # This no longer works in cluster. See test_exists_fail_not_same_slots() for failing test # assert r.exists('a', 'b') == 2 def test_exists_fail_not_same_slots(self, r): @@ -891,7 +891,7 @@ def test_zinterstore_max(self, r): def test_zinterstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) + r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 2 assert r.zrange('d{foo}', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] @@ -1412,7 +1412,7 @@ def test_large_responses(self, r): # load up 100K of data into a key data = ''.join([ascii_letters] * (100000 // len(ascii_letters))) r['a'] = data - assert r['a'] == bdata + assert r['a'] == data.encode() def test_floating_point_encoding(self, r): """ From 64fd6d565929c401e9cf5936c31e08cb13dc348b Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:30:10 +0100 Subject: [PATCH 22/65] Remove test class that is no longer used. --- tests/test_commands.py | 41 ----------------------------------------- 1 file changed, 41 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 3795fd72..3135b47f 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1297,47 +1297,6 @@ def test_sort_all_options(self, r): [b'vodka', b'milk', b'gin', b'apple juice'] -class TestStrictCommands(object): - - def test_strict_zadd(self, sr): - sr.zadd('a', 1.0, 'a1', 2.0, 'a2', a3=3.0) - assert sr.zrange('a', 0, -1, withscores=True) == \ - [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)] - - def test_strict_lrem(self, sr): - sr.rpush('a', 'a1', 'a2', 'a3', 'a1') - sr.lrem('a', 0, 'a1') - assert sr.lrange('a', 0, -1) == [b'a2', b'a3'] - - def test_strict_setex(self, sr): - assert sr.setex('a', 60, '1') - assert sr['a'] == b'1' - assert 0 < sr.ttl('a') <= 60 - - def test_strict_ttl(self, sr): - assert not sr.expire('a', 10) - sr['a'] = '1' - assert sr.expire('a', 10) - assert 0 < sr.ttl('a') <= 10 - assert sr.persist('a') - assert sr.ttl('a') == -1 - - def test_strict_pttl(self, sr): - assert not sr.pexpire('a', 10000) - sr['a'] = '1' - assert sr.pexpire('a', 10000) - assert 0 < sr.pttl('a') <= 10000 - assert sr.persist('a') - assert sr.pttl('a') == -1 - - def test_eval(self, sr): - res = sr.eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", 2, "A{foo}", "B{foo}", "first", "second") - assert res[0] == b'A{foo}' - assert res[1] == b'B{foo}' - assert res[2] == b'first' - assert res[3] == b'second' - - class TestBinarySave(object): def test_binary_get_set(self, r): assert r.set(' foo bar ', '123') From 022e4011d865b75851aaf43147931507cf9f3831 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:34:51 +0100 Subject: [PATCH 23/65] Fix test test_get_node_by_slot_random --- tests/test_cluster_connection_pool.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index f403e684..d7af752b 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -242,6 +242,12 @@ def test_get_node_by_slot_random(self): """ pool = self.get_pool(connection_kwargs={}) + # Set the values that we expect to be set for the NodeManager. Represents 2 nodes for 1 specific slot + pool.nodes.slots[0] = [ + {'host': '172.20.0.2', 'port': 7000, 'name': '172.20.0.2:7000', 'server_type': 'master'}, + {'host': '172.20.0.2', 'port': 7003, 'name': '172.20.0.2:7003', 'server_type': 'slave'}, + ] + expected_ports = {7000, 7003} actual_ports = set() for _ in range(0, 100): From b7454aec52d719c43b5bcd8e5bec59a9b8c5454d Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 22:35:58 +0100 Subject: [PATCH 24/65] Fix test test_boolean_parsing --- tests/test_cluster_connection_pool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_cluster_connection_pool.py b/tests/test_cluster_connection_pool.py index d7af752b..c586d1c1 100644 --- a/tests/test_cluster_connection_pool.py +++ b/tests/test_cluster_connection_pool.py @@ -17,7 +17,7 @@ import pytest import redis from mock import patch, Mock -from redis.connection import ssl_available +from redis.connection import ssl_available, to_bool from redis._compat import unicode From 22b3ad7ee5f6789dd4461290a6a2eebb8fdfd0c5 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:04:13 +0100 Subject: [PATCH 25/65] Make a mocked RedisCluster object return a more stable and determenistic RedisCluster object by forcing it to think the nodes setup is always pointing to 127.0.0.1 to avoid issues with different ip addresses and node configurations returned from redis. Your real redis server that you test against still must be accessable on 127.0.0.1 for tests to work out. --- tests/test_cluster_obj.py | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index dd96c252..1e442be4 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -31,6 +31,41 @@ class DummyConnection(object): pass +def get_mocked_redis_client(*args, **kwargs): + """ + Return a stable RedisCluster object that have deterministic + nodes and slots setup to remove the problem of different IP addresses + on different installations and machines. + """ + with patch.object(Redis, 'execute_command') as execute_command_mock: + def execute_command(self, *args, **kwargs): + if args[0] == 'slots': + mock_cluster_slots = [ + [ + 0, 5460, + ['127.0.0.1', 7000, 'node_0'], + ['127.0.0.1', 7004, 'node_4'] + ], + [ + 5461, 10922, + ['127.0.0.1', 7001, 'node_1'], + ['127.0.0.1', 7005, 'node_5'] + ], + [ + 10923, 16383, + ['127.0.0.1', 7002, 'node_2'], + ['127.0.0.1', 7003, '2node_3'] + ] + ] + return mock_cluster_slots + elif args[0] == 'cluster-require-full-coverage': + return {'cluster-require-full-coverage': 'yes'} + + execute_command_mock.side_effect = execute_command + + return RedisCluster(*args, **kwargs) + + def test_representation(r): assert re.search('^RedisCluster<[0-9\.\:\,].+>$', str(r)) @@ -296,7 +331,7 @@ def test_pipeline_ask_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = RedisCluster(host="127.0.0.1", port=7000) + r = get_mocked_redis_client(host="127.0.0.1", port=7000) with patch.object(RedisCluster, 'parse_response') as parse_response: From d5486f3be915f97d3986bf5d852d368c285b586f Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:27:21 +0100 Subject: [PATCH 26/65] Update sorted set tests to the latest code. Some still fails but most of them now pass --- tests/test_commands.py | 141 ++++++++++++++++++++++++++++++++--------- 1 file changed, 110 insertions(+), 31 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 3135b47f..38a53a16 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -15,6 +15,7 @@ from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode from redis.client import parse_info from redis.exceptions import ResponseError, DataError, RedisError, DataError +from redis import exceptions pytestmark = skip_if_server_version_lt('2.9.0') @@ -823,17 +824,45 @@ def test_zadd(self, r): [(b'a1', 1.0), (b'a2', 2.0), (b'a3', 3.0)] # error cases - with pytest.raises(DataError): + with pytest.raises(exceptions.DataError): r.zadd('a', {}) # cannot use both nx and xx options - with pytest.raises(DataError): + with pytest.raises(exceptions.DataError): r.zadd('a', mapping, nx=True, xx=True) # cannot use the incr options with more than one value - with pytest.raises(DataError): + with pytest.raises(exceptions.DataError): r.zadd('a', mapping, incr=True) + def test_zadd_nx(self, r): + assert r.zadd('a', {'a1': 1}) == 1 + assert r.zadd('a', {'a1': 99, 'a2': 2}, nx=True) == 1 + assert r.zrange('a', 0, -1, withscores=True) == \ + [(b'a1', 1.0), (b'a2', 2.0)] + + def test_zadd_xx(self, r): + assert r.zadd('a', {'a1': 1}) == 1 + assert r.zadd('a', {'a1': 99, 'a2': 2}, xx=True) == 0 + assert r.zrange('a', 0, -1, withscores=True) == \ + [(b'a1', 99.0)] + + def test_zadd_ch(self, r): + assert r.zadd('a', {'a1': 1}) == 1 + assert r.zadd('a', {'a1': 99, 'a2': 2}, ch=True) == 2 + assert r.zrange('a', 0, -1, withscores=True) == \ + [(b'a2', 2.0), (b'a1', 99.0)] + + def test_zadd_incr(self, r): + assert r.zadd('a', {'a1': 1}) == 1 + assert r.zadd('a', {'a1': 4.5}, incr=True) == 5.5 + + def test_zadd_incr_with_xx(self, r): + # this asks zadd to incr 'a1' only if it exists, but it clearly + # doesn't. Redis returns a null value in this case and so should + # redis-py + assert r.zadd('a', {'a1': 1}, xx=True, incr=True) is None + def test_zcard(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcard('a') == 3 @@ -842,6 +871,8 @@ def test_zcount(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zcount('a', '-inf', '+inf') == 3 assert r.zcount('a', 1, 2) == 2 + assert r.zcount('a', '(' + str(1), 2) == 1 + assert r.zcount('a', 1, '(' + str(2)) == 1 assert r.zcount('a', 10, 20) == 0 def test_zincrby(self, r): @@ -851,6 +882,7 @@ def test_zincrby(self, r): assert r.zscore('a', 'a2') == 3.0 assert r.zscore('a', 'a3') == 8.0 + @skip_if_server_version_lt('2.8.9') def test_zlexcount(self, r): r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zlexcount('a', '-', '+') == 7 @@ -864,14 +896,6 @@ def test_zinterstore_fail_cross_slot(self, r): r.zinterstore('d', ['a', 'b', 'c']) assert re.search('ClusterCrossSlotError', str(excinfo)) - def test_zinterstore_sum(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 2 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ - [(b'a3', 8), (b'a1'), 9] - def test_zinterstore_sum(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -884,28 +908,70 @@ def test_zinterstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 2 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MAX') == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] def test_zinterstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 2 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MIN') == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] def test_zinterstore_with_weight(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 2 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zinterstore('d', {'a': 1, 'b': 2, 'c': 3}) == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 20), (b'a1', 23)] + @skip_if_server_version_lt('4.9.0') + def test_zpopmax(self, r): + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmax('a') == [(b'a3', 3)] + + # with count + assert r.zpopmax('a', count=2) == \ + [(b'a2', 2), (b'a1', 1)] + + @skip_if_server_version_lt('4.9.0') + def test_zpopmin(self, r): + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmin('a') == [(b'a1', 1)] + + # with count + assert r.zpopmin('a', count=2) == \ + [(b'a2', 2), (b'a3', 3)] + + @skip_if_server_version_lt('4.9.0') + def test_bzpopmax(self, r): + r.zadd('a', {'a1': 1, 'a2': 2}) + r.zadd('b', {'b1': 10, 'b2': 20}) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmax(['b', 'a'], timeout=1) is None + r.zadd('c', {'c1': 100}) + assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100) + + @skip_if_server_version_lt('4.9.0') + def test_bzpopmin(self, r): + r.zadd('a', {'a1': 1, 'a2': 2}) + r.zadd('b', {'b1': 10, 'b2': 20}) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmin(['b', 'a'], timeout=1) is None + r.zadd('c', {'c1': 100}) + assert r.bzpopmin('c', timeout=1) == (b'c', b'c1', 100) + def test_zrange(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrange('a', 0, 1) == [b'a1', b'a2'] assert r.zrange('a', 1, 2) == [b'a2', b'a3'] @@ -919,8 +985,9 @@ def test_zrange(self, r): assert r.zrange('a', 0, 1, withscores=True, score_cast_func=int) == \ [(b'a1', 1), (b'a2', 2)] + @skip_if_server_version_lt('2.8.9') def test_zrangebylex(self, r): - r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zrangebylex('a', '-', '[c') == [b'a', b'b', b'c'] assert r.zrangebylex('a', '-', '(c') == [b'a', b'b'] assert r.zrangebylex('a', '[aaa', '(g') == \ @@ -928,8 +995,19 @@ def test_zrangebylex(self, r): assert r.zrangebylex('a', '[f', '+') == [b'f', b'g'] assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b'd', b'e'] + @skip_if_server_version_lt('2.9.9') + def test_zrevrangebylex(self, r): + r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) + assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a'] + assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a'] + assert r.zrevrangebylex('a', '(g', '[aaa') == \ + [b'f', b'e', b'd', b'c', b'b'] + assert r.zrevrangebylex('a', '+', '[f') == [b'g', b'f'] + assert r.zrevrangebylex('a', '+', '-', start=3, num=2) == \ + [b'd', b'c'] + def test_zrangebyscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrangebyscore('a', 2, 4) == [b'a2', b'a3', b'a4'] # slicing with start/num @@ -963,6 +1041,7 @@ def test_zrem_multiple_keys(self, r): assert r.zrem('a', 'a1', 'a2') == 2 assert r.zrange('a', 0, 5) == [b'a3'] + @skip_if_server_version_lt('2.8.9') def test_zremrangebylex(self, r): r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) assert r.zremrangebylex('a', '-', '[c') == 3 @@ -978,14 +1057,14 @@ def test_zremrangebyrank(self, r): assert r.zrange('a', 0, 5) == [b'a1', b'a5'] def test_zremrangebyscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zremrangebyscore('a', 2, 4) == 3 assert r.zrange('a', 0, -1) == [b'a1', b'a5'] assert r.zremrangebyscore('a', 2, 4) == 0 assert r.zrange('a', 0, -1) == [b'a1', b'a5'] def test_zrevrange(self, r): - r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zrevrange('a', 0, 1) == [b'a3', b'a2'] assert r.zrevrange('a', 1, 2) == [b'a2', b'a1'] @@ -1001,7 +1080,7 @@ def test_zrevrange(self, r): [(b'a3', 3.0), (b'a2', 2.0)] def test_zrevrangebyscore(self, r): - r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) assert r.zrevrangebyscore('a', 4, 2) == [b'a4', b'a3', b'a2'] # slicing with start/num @@ -1041,32 +1120,32 @@ def test_zunionstore_sum(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}']) == 4 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zunionstore('d', ['a', 'b', 'c']) == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] def test_zunionstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MAX') == 4 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] def test_zunionstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d{foo}', ['a{foo}', 'b{foo}', 'c{foo}'], aggregate='MIN') == 4 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] def test_zunionstore_with_weight(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d{foo}', {'a{foo}': 1, 'b{foo}': 2, 'c{foo}': 3}) == 4 - assert r.zrange('d{foo}', 0, -1, withscores=True) == \ + assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] # # HYPERLOGLOG TESTS From 5834df81e31a6bf7306c9ce73378186d0303cf96 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:28:07 +0100 Subject: [PATCH 27/65] Fix test_keys --- tests/test_commands.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 38a53a16..5c9aac6f 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -318,14 +318,13 @@ def test_incrbyfloat(self, r): assert float(r['a']) == float(2.1) def test_keys(self, r): - keys = r.keys() - assert keys == [] + assert r.keys() == [] keys_with_underscores = {b'test_a', b'test_b'} keys = keys_with_underscores.union({b'testc'}) for key in keys: r[key] = 1 - assert set(r.keys(pattern='test_*')) == {b"{0}".format(k) for k in keys_with_underscores} - assert set(r.keys(pattern='test*')) == {b"{0}".format(k) for k in keys} + assert set(r.keys(pattern='test_*')) == keys_with_underscores + assert set(r.keys(pattern='test*')) == keys def test_mget(self, r): assert r.mget([]) == [] From 2c1797658bdab311e8e99ceb6b713331c1f60c48 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:52:41 +0100 Subject: [PATCH 28/65] Make all sort commands into own class to make em easier to run. Add test skip markmarks for 7 sort tests that fails based on how they are currently designed to work cross slot. If a good set of keys is found to work on the redis server, they will be reintegrated. --- tests/test_commands.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_commands.py b/tests/test_commands.py index 5c9aac6f..3d041c9d 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1256,6 +1256,8 @@ def test_hvals(self, r): remote_vals = r.hvals('a') assert sorted(local_vals) == sorted(remote_vals) + +class TestRedisCommandsSort(object): # SORT def test_sort_basic(self, r): r.rpush('a', '3', '2', '1', '4') @@ -1265,6 +1267,7 @@ def test_sort_limited(self, r): r.rpush('a', '3', '2', '1', '4') assert r.sort('a', start=1, num=2) == [b'2', b'3'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_by(self, r): r['score:1'] = 8 r['score:2'] = 3 @@ -1272,6 +1275,7 @@ def test_sort_by(self, r): r.rpush('a', '3', '2', '1') assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_get(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' @@ -1279,6 +1283,7 @@ def test_sort_get(self, r): r.rpush('a', '2', '3', '1') assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_get_multi(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' @@ -1287,6 +1292,7 @@ def test_sort_get_multi(self, r): assert r.sort('a', get=('user:*', '#')) == \ [b'u1', b'1', b'u2', b'2', b'u3', b'3'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_get_groups_two(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' @@ -1319,6 +1325,7 @@ def test_sort_groups_no_get(self, r): with pytest.raises(DataError): r.sort('a', groups=True) + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_groups_three_gets(self, r): r['user:1'] = 'u1' r['user:2'] = 'u2' @@ -1342,11 +1349,13 @@ def test_sort_alpha(self, r): assert r.sort('a', alpha=True) == \ [b'a', b'b', b'c', b'd', b'e'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_store(self, r): r.rpush('a', '2', '3', '1') assert r.sort('a', store='sorted_values') == 3 assert r.lrange('sorted_values', 0, -1) == [b'1', b'2', b'3'] + @pytest.mark.skip(reason="Sort works if done against keys in same slot") def test_sort_all_options(self, r): r['user:1:username'] = 'zeus' r['user:2:username'] = 'titan' From f8f58fe326a30d35de3b98a5ce9ecd8ff1551fbf Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 24 Mar 2019 23:54:04 +0100 Subject: [PATCH 29/65] Rip out the old sort client side version and revert back this method to now only work on the same-slot logic. This might be breaking change when released. --- rediscluster/client.py | 146 ----------------------------------------- 1 file changed, 146 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index a2d4742a..dba80661 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -911,152 +911,6 @@ def rpoplpush(self, src, dst): return None - def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha=False, store=None, groups=None): - """Sort and return the list, set or sorted set at ``name``. - - :start: and :num: - allow for paging through the sorted data - - :by: - allows using an external key to weight and sort the items. - Use an "*" to indicate where in the key the item value is located - - :get: - allows for returning items from external keys rather than the - sorted data itself. Use an "*" to indicate where int he key - the item value is located - - :desc: - allows for reversing the sort - - :alpha: - allows for sorting lexicographically rather than numerically - - :store: - allows for storing the result of the sort into the key `store` - - ClusterImpl: - A full implementation of the server side sort mechanics because many of the - options work on multiple keys that can exist on multiple servers. - """ - if (start is None and num is not None) or \ - (start is not None and num is None): - raise RedisError("RedisError: ``start`` and ``num`` must both be specified") - try: - data_type = b"{0}".format(self.type(name)) - - if data_type == b"none": - return [] - elif data_type == b"set": - data = list(self.smembers(name))[:] - elif data_type == b"list": - data = self.lrange(name, 0, -1) - else: - raise RedisClusterException("Unable to sort data type : {0}".format(data_type)) - if by is not None: - # _sort_using_by_arg mutates data so we don't - # need need a return value. - self._sort_using_by_arg(data, by, alpha) - elif not alpha: - data.sort(key=self._strtod_key_func) - else: - data.sort() - if desc: - data = data[::-1] - if not (start is None and num is None): - data = data[start:start + num] - - if get: - data = self._retrive_data_from_sort(data, get) - - if store is not None: - if data_type == b"set": - self.delete(store) - self.rpush(store, *data) - elif data_type == b"list": - self.delete(store) - self.rpush(store, *data) - else: - raise RedisClusterException("Unable to store sorted data for data type : {0}".format(data_type)) - - return len(data) - - if groups: - if not get or isinstance(get, (bytes, basestring)) or len(get) < 2: - raise DataError('when using "groups" the "get" argument ' - 'must be specified and contain at least ' - 'two keys') - n = len(get) - return list(izip(*[data[i::n] for i in range(n)])) - else: - return data - except KeyError: - return [] - - def _retrive_data_from_sort(self, data, get): - """ - Used by sort() - """ - if get is not None: - if isinstance(get, (bytes, basestring)): - get = [get] - new_data = [] - for k in data: - for g in get: - single_item = self._get_single_item(k, g) - new_data.append(single_item) - data = new_data - return data - - def _get_single_item(self, k, g): - """ - Used by sort() - """ - if getattr(k, "decode", None): - k = k.decode("utf-8") - - if '*' in g: - g = g.replace('*', k) - if '->' in g: - key, hash_key = g.split('->') - single_item = self.get(key, {}).get(hash_key) - else: - single_item = self.get(g) - elif '#' in g: - single_item = k - else: - single_item = None - return b"{0}".format(single_item) - - def _strtod_key_func(self, arg): - """ - Used by sort() - """ - return float(arg) - - def _sort_using_by_arg(self, data, by, alpha): - """ - Used by sort() - """ - if getattr(by, "decode", None): - by = by.decode("utf-8") - - def _by_key(arg): - if getattr(arg, "decode", None): - arg = arg.decode("utf-8") - - key = by.replace('*', arg) - if '->' in by: - key, hash_key = key.split('->') - v = self.hget(key, hash_key) - if alpha: - return v - else: - return float(v) - else: - return self.get(key) - data.sort(key=_by_key) - ### # Set commands From 7907f038ae5fd8382af19d2bf16e0e0d056b1e97 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 25 Mar 2019 00:07:46 +0100 Subject: [PATCH 30/65] Add skip marks for most broken set tests that requires cross slot implementation to work. Also fixed one broken test after zadd signature update --- tests/test_commands.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/tests/test_commands.py b/tests/test_commands.py index 3d041c9d..91377ed7 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -856,6 +856,7 @@ def test_zadd_incr(self, r): assert r.zadd('a', {'a1': 1}) == 1 assert r.zadd('a', {'a1': 4.5}, incr=True) == 5.5 + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zadd_incr_with_xx(self, r): # this asks zadd to incr 'a1' only if it exists, but it clearly # doesn't. Redis returns a null value in this case and so should @@ -895,6 +896,7 @@ def test_zinterstore_fail_cross_slot(self, r): r.zinterstore('d', ['a', 'b', 'c']) assert re.search('ClusterCrossSlotError', str(excinfo)) + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_sum(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -903,6 +905,7 @@ def test_zinterstore_sum(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1', 9)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -911,6 +914,7 @@ def test_zinterstore_max(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) @@ -919,6 +923,7 @@ def test_zinterstore_min(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_with_weight(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -928,6 +933,7 @@ def test_zinterstore_with_weight(self, r): [(b'a3', 20), (b'a1', 23)] @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zpopmax(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zpopmax('a') == [(b'a3', 3)] @@ -937,6 +943,7 @@ def test_zpopmax(self, r): [(b'a2', 2), (b'a1', 1)] @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zpopmin(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) assert r.zpopmin('a') == [(b'a1', 1)] @@ -946,6 +953,7 @@ def test_zpopmin(self, r): [(b'a2', 2), (b'a3', 3)] @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_bzpopmax(self, r): r.zadd('a', {'a1': 1, 'a2': 2}) r.zadd('b', {'b1': 10, 'b2': 20}) @@ -958,6 +966,7 @@ def test_bzpopmax(self, r): assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100) @skip_if_server_version_lt('4.9.0') + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_bzpopmin(self, r): r.zadd('a', {'a1': 1, 'a2': 2}) r.zadd('b', {'b1': 10, 'b2': 20}) @@ -1108,13 +1117,14 @@ def test_zscore(self, r): assert r.zscore('a', 'a4') is None def test_zunionstore_fail_crossslot(self, r): - r.zadd('a', a1=1, a2=1, a3=1) - r.zadd('b', a1=2, a2=2, a3=2) - r.zadd('c', a1=6, a3=5, a4=4) + r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('c', {'a1': 6, 'a2': 5, 'a3': 4}) with pytest.raises(ResponseError) as excinfo: r.zunionstore('d', ['a', 'b', 'c']) assert re.search('ClusterCrossSlotError', str(excinfo)) + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_sum(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -1123,6 +1133,7 @@ def test_zunionstore_sum(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_max(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) @@ -1131,6 +1142,7 @@ def test_zunionstore_max(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_min(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4}) @@ -1139,6 +1151,7 @@ def test_zunionstore_min(self, r): assert r.zrange('d', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] + @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_with_weight(self, r): r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) From c671e072e4fbbf9314fbae21362d0045bb804a6c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 25 Mar 2019 00:07:59 +0100 Subject: [PATCH 31/65] Fix binary string typo --- tests/test_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 5f01b919..963c7ca3 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -119,7 +119,7 @@ def test_exec_error_in_response(self, r): # we can't lpush to a key that's a string value, so this should # be a ResponseError exception assert isinstance(result[2], ResponseError) - assert r['c'] == 'a' + assert r['c'] == b'a' # since this isn't a transaction, the other commands after the # error are still executed From b1b4c59126b7e2191fcacc653914f839ba65c708 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 25 Mar 2019 00:19:50 +0100 Subject: [PATCH 32/65] Fix 2 broken things after last rebase --- tests/test_cluster_obj.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 1e442be4..938b3a89 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -479,19 +479,10 @@ def test_access_correct_slave_with_readonly_mode_client(sr): 'get_master_node_by_slot', return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) -<<<<<<< HEAD assert b('foo') == readonly_client.get('foo16706') readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) assert b('foo') == readonly_client.get('foo16706') -======= - assert b'foo' == readonly_client.get('foo16706') - assert return_master_mock.call_count == 0 - - readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) - assert b'foo' == readonly_client.get('foo16706') - assert return_master_mock.call_count == 0 ->>>>>>> Fix more byte method conversion calls def test_refresh_using_specific_nodes(r): From 389d7b4cfc2ebf2601740617d3aa413e7db3a3b0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 10 Apr 2019 00:51:51 +0200 Subject: [PATCH 33/65] Fix test_pipeline_readonly --- tests/test_pipeline.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 963c7ca3..72d190c1 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -541,8 +541,8 @@ def test_pipeline_readonly(self, r, ro): On readonly mode, we supports get related stuff only. """ r.set('foo71', 'a1') # we assume this key is set on 127.0.0.1:7001 - r.zadd('foo88', z1=1) # we assume this key is set on 127.0.0.1:7002 - r.zadd('foo88', z2=4) + r.zadd('foo88', {'z1': 1}) # we assume this key is set on 127.0.0.1:7002 + r.zadd('foo88', {'z2': 4}) with ro.pipeline() as readonly_pipe: readonly_pipe.get('foo71').zrange('foo88', 0, 5, withscores=True) From b9096230846d2bdd3fd5d80e579c4ed4ebe2cb2c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 10 Apr 2019 00:54:46 +0200 Subject: [PATCH 34/65] Fix tests test_moved_redirection, test_moved_redirection_pipeline --- tests/test_cluster_obj.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 938b3a89..9ec515d3 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -364,7 +364,7 @@ def test_moved_redirection(): Important thing to verify is that it tries to talk to the second node. """ - r = RedisCluster(host="127.0.0.1", port=7000) + r = get_mocked_redis_client(host="127.0.0.1", port=7000) m = Mock(autospec=True) def ask_redirect_effect(connection, *args, **options): @@ -403,7 +403,8 @@ def ok_response(connection, *args, **options): parse_response.side_effect = moved_redirect_effect - r = RedisCluster(host="127.0.0.1", port=7000) + # r = RedisCluster(host="127.0.0.1", port=7000) + r = get_mocked_redis_client(host="127.0.0.1", port=7000) p = r.pipeline() p.set("foo", "bar") assert p.execute() == ["MOCK_OK"] From 651a473300446ad934775f27f6ca305e5335ace2 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 10 Apr 2019 00:56:09 +0200 Subject: [PATCH 35/65] fix test test_access_correct_slave_with_readonly_mode_client --- tests/test_cluster_obj.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 9ec515d3..632851f0 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -480,10 +480,10 @@ def test_access_correct_slave_with_readonly_mode_client(sr): 'get_master_node_by_slot', return_value=master_value) as return_master_mock: readonly_client = RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True) - assert b('foo') == readonly_client.get('foo16706') + assert b'foo' == readonly_client.get('foo16706') readonly_client = RedisCluster.from_url(url="redis://127.0.0.1:7000/0", readonly_mode=True) - assert b('foo') == readonly_client.get('foo16706') + assert b'foo' == readonly_client.get('foo16706') def test_refresh_using_specific_nodes(r): From 715b9f592a6424c5280fa3fab6fe289dd942c8a0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 10 Apr 2019 01:07:10 +0200 Subject: [PATCH 36/65] Update test methods of TestPubSubPubSubSubcommands class to fix the broken tests --- tests/test_pubsub.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 556f584f..566fd78b 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -479,29 +479,30 @@ def t_run(rc): class TestPubSubPubSubSubcommands(object): - """ - Test Pub/Sub subcommands of PUBSUB - @see https://redis.io/commands/pubsub - """ - @skip_if_redis_py_version_lt('2.10.6') + @skip_if_server_version_lt('2.8.0') def test_pubsub_channels(self, r): - r.pubsub(ignore_subscribe_messages=True).subscribe('foo', 'bar', 'baz', 'quux') + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe('foo', 'bar', 'baz', 'quux') channels = sorted(r.pubsub_channels()) assert channels == [b'bar', b'baz', b'foo', b'quux'] - @skip_if_redis_py_version_lt('2.10.6') + @skip_if_server_version_lt('2.8.0') def test_pubsub_numsub(self, r): - r.pubsub(ignore_subscribe_messages=True).subscribe('foo', 'bar', 'baz') - r.pubsub(ignore_subscribe_messages=True).subscribe('bar', 'baz') - r.pubsub(ignore_subscribe_messages=True).subscribe('baz') + p1 = r.pubsub(ignore_subscribe_messages=True) + p1.subscribe('foo', 'bar', 'baz') + p2 = r.pubsub(ignore_subscribe_messages=True) + p2.subscribe('bar', 'baz') + p3 = r.pubsub(ignore_subscribe_messages=True) + p3.subscribe('baz') channels = [(b'foo', 1), (b'bar', 2), (b'baz', 3)] - assert channels == sorted(r.pubsub_numsub('foo', 'bar', 'baz')) + assert channels == r.pubsub_numsub('foo', 'bar', 'baz') - @skip_if_redis_py_version_lt('2.10.6') + @skip_if_server_version_lt('2.8.0') def test_pubsub_numpat(self, r): - r.pubsub(ignore_subscribe_messages=True).psubscribe('*oo', '*ar', 'b*z') + p = r.pubsub(ignore_subscribe_messages=True) + p.psubscribe('*oo', '*ar', 'b*z') assert r.pubsub_numpat() == 3 From d71822e1af0a05cdd0a41de9086a650ec1761385 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Wed, 24 Apr 2019 19:29:50 +0200 Subject: [PATCH 37/65] Update conftest.py and start to update test_commands.py to be the same in redis-py --- tests/conftest.py | 123 +++++++++++++++++++++++++++++--------- tests/test_cluster_obj.py | 13 +++- tests/test_commands.py | 48 ++++++++++++++- 3 files changed, 152 insertions(+), 32 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index f359ccaa..23871eff 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,31 +20,34 @@ _REDIS_VERSIONS = {} -def get_versions(**kwargs): - """ - """ - key = json.dumps(kwargs) +def get_version(**kwargs): + params = {'host': 'localhost', 'port': 7000} + params.update(kwargs) + key = '%s:%s' % (params['host'], params['port']) if key not in _REDIS_VERSIONS: - client = _get_client(**kwargs) - _REDIS_VERSIONS[key] = {key: value['redis_version'] for key, value in client.info().items()} - return _REDIS_VERSIONS[key] + client = RedisCluster(**params) + # INFO command returns for all nodes but we only care for port 7000 + client_info = client.info() + for client_id, client_data in client_info.items(): + if '7000' in key: + _REDIS_VERSIONS[key] = client_data['redis_version'] + + client.connection_pool.disconnect() + return _REDIS_VERSIONS[key] -def _get_client(cls=None, **kwargs): - """ - """ - if not cls: - cls = RedisCluster - params = { - 'startup_nodes': [{ - 'host': '127.0.0.1', 'port': 7000 - }], - 'socket_timeout': 10, - 'decode_responses': False, - } +def _get_client(cls, request=None, **kwargs): + params = {'host': 'localhost', 'port': 7000} params.update(kwargs) - return cls(**params) + client = cls(**params) + client.flushdb() + if request: + def teardown(): + client.flushdb() + client.connection_pool.disconnect() + request.addfinalizer(teardown) + return client def _init_client(request, cls=None, **kwargs): @@ -78,13 +81,13 @@ def skip_if_not_password_protected_nodes(): def skip_if_server_version_lt(min_version): - """ - """ - versions = get_versions() - for version in versions.values(): - if StrictVersion(version) < StrictVersion(min_version): - return pytest.mark.skipif(True, reason="") - return pytest.mark.skipif(False, reason="") + check = StrictVersion(get_version()) < StrictVersion(min_version) + return pytest.mark.skipif(check, reason="") + + +def skip_if_server_version_gte(min_version): + check = StrictVersion(get_version()) >= StrictVersion(min_version) + return pytest.mark.skipif(check, reason="") def skip_if_redis_py_version_lt(min_version): @@ -148,3 +151,69 @@ def sr(request, *args, **kwargs): Returns a instance of RedisCluster """ return _init_client(request, reinitialize_steps=1, cls=RedisCluster, **kwargs) + + +def _gen_cluster_mock_resp(r, response): + mock_connection_pool = Mock() + connection = Mock() + response = response + connection.read_response.return_value = response + mock_connection_pool.get_connection.return_value = connection + r.connection_pool = mock_connection_pool + return r + + +@pytest.fixture() +def mock_cluster_resp_ok(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + return _gen_cluster_mock_resp(r, 'OK') + + +@pytest.fixture() +def mock_cluster_resp_int(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + return _gen_cluster_mock_resp(r, '2') + + +@pytest.fixture() +def mock_cluster_resp_info(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + response = ('cluster_state:ok\r\ncluster_slots_assigned:16384\r\n' + 'cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n' + 'cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n' + 'cluster_size:3\r\ncluster_current_epoch:7\r\n' + 'cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n' + 'cluster_stats_messages_received:105653\r\n') + return _gen_cluster_mock_resp(r, response) + + +@pytest.fixture() +def mock_cluster_resp_nodes(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + response = ('c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 ' + 'slave aa90da731f673a99617dfe930306549a09f83a6b 0 ' + '1447836263059 5 connected\n' + '9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 ' + 'master - 0 1447836264065 0 connected\n' + 'aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 ' + 'myself,master - 0 0 2 connected 5461-10922\n' + '1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 ' + 'slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 ' + '1447836262556 3 connected\n' + '4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 ' + 'master - 0 1447836262555 7 connected 0-5460\n' + '19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 ' + 'master - 0 1447836263562 3 connected 10923-16383\n' + 'fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 ' + 'master,fail - 1447829446956 1447829444948 1 disconnected\n' + ) + return _gen_cluster_mock_resp(r, response) + + +@pytest.fixture() +def mock_cluster_resp_slaves(request, **kwargs): + r = _get_client(RedisCluster, request, **kwargs) + response = ("['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 " + "slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 " + "1447836789290 3 connected']") + return _gen_cluster_mock_resp(r, response) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 632851f0..756890dd 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -425,7 +425,13 @@ def assert_moved_redirection_on_slave(sr, connection_pool_cls, cluster_obj): 'server_type': 'slave', } - master_value = {'host': '127.0.0.1', 'name': '127.0.0.1:7000', 'port': 7000, 'server_type': 'master'} + master_value = { + 'host': '127.0.0.1', + 'name': '127.0.0.1:7000', + 'port': 7000, + 'server_type': 'master', + } + with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: return_master_mock.return_value = master_value assert cluster_obj.get('foo16706') == b'foo' @@ -437,10 +443,13 @@ def test_moved_redirection_on_slave_with_default_client(sr): Test that the client is redirected normally with default (readonly_mode=False) client even when we connect always to slave. """ + r = get_mocked_redis_client(host="127.0.0.1", port=7000) + assert_moved_redirection_on_slave( sr, ClusterConnectionPool, - RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) + # RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) + get_mocked_redis_client(host="127.0.0.1", port=7000, reinitialize_steps=1) ) diff --git a/tests/test_commands.py b/tests/test_commands.py index 91377ed7..01081bd7 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -7,7 +7,9 @@ import time # rediscluster imports +import rediscluster from rediscluster.exceptions import RedisClusterException, ClusterCrossSlotError +from rediscluster.utils import dict_merge from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt # 3rd party imports @@ -18,15 +20,55 @@ from redis import exceptions -pytestmark = skip_if_server_version_lt('2.9.0') +@pytest.fixture() +def slowlog(request, r): + current_config = r.config_get() + old_slower_than_value = current_config['slowlog-log-slower-than'] + old_max_legnth_value = current_config['slowlog-max-len'] + + def cleanup(): + r.config_set('slowlog-log-slower-than', old_slower_than_value) + r.config_set('slowlog-max-len', old_max_legnth_value) + request.addfinalizer(cleanup) + + r.config_set('slowlog-log-slower-than', 0) + r.config_set('slowlog-max-len', 128) def redis_server_time(client): - seconds, milliseconds = list(client.time().values())[0] - timestamp = float('{0}.{1}'.format(seconds, milliseconds)) + all_clients_time = client.time() + for server_id, server_time_data in all_clients_time.items(): + if '7000' in server_id: + seconds, milliseconds = server_time_data + + timestamp = float('%s.%s' % (seconds, milliseconds)) return datetime.datetime.fromtimestamp(timestamp) +def get_stream_message(client, stream, message_id): + "Fetch a stream message and format it as a (message_id, fields) pair" + response = client.xrange(stream, min=message_id, max=message_id) + assert len(response) == 1 + return response[0] + + +# RESPONSE CALLBACKS +class TestResponseCallbacks(object): + "Tests for the response callback system" + + def test_response_callbacks(self, r): + all_response_callbacks = dict_merge( + rediscluster.RedisCluster.RESPONSE_CALLBACKS, + rediscluster.RedisCluster.CLUSTER_COMMANDS_RESPONSE_CALLBACKS, + ) + + assert r.response_callbacks == all_response_callbacks + assert id(r.response_callbacks) != id(all_response_callbacks) + r.set_response_callback('GET', lambda x: 'static') + r['a'] = 'foo' + assert r['a'] == 'static' + + class TestRedisCommands(object): @skip_if_server_version_lt('2.9.9') From 51386e9f32b8fd00f35a00f09ac7f3a1357002ed Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 25 Apr 2019 21:03:46 +0200 Subject: [PATCH 38/65] Add command handlers for CLIENT ID command --- rediscluster/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rediscluster/client.py b/rediscluster/client.py index dba80661..5e953019 100644 --- a/rediscluster/client.py +++ b/rediscluster/client.py @@ -60,7 +60,7 @@ class RedisCluster(Redis): "BGREWRITEAOF", "BGSAVE", "CLIENT LIST", "CLIENT GETNAME", "CONFIG RESETSTAT", "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", "TIME", "KEYS", "CLUSTER INFO", "PUBSUB CHANNELS", - "PUBSUB NUMSUB", "PUBSUB NUMPAT", + "PUBSUB NUMSUB", "PUBSUB NUMPAT", "CLIENT ID", ], 'all-nodes'), string_keys_to_dict([ "FLUSHALL", "FLUSHDB", "SCRIPT LOAD", "SCRIPT FLUSH", "SCRIPT EXISTS", "SCAN", @@ -99,7 +99,7 @@ class RedisCluster(Redis): "BGREWRITEAOF", "BGSAVE", "CLIENT LIST", "CLIENT GETNAME", "CONFIG RESETSTAT", "CONFIG REWRITE", "DBSIZE", "LASTSAVE", "PING", "SAVE", "SLOWLOG LEN", "SLOWLOG RESET", "TIME", "SCAN", "CLUSTER INFO", 'CLUSTER ADDSLOTS', 'CLUSTER COUNT-FAILURE-REPORTS', - 'CLUSTER DELSLOTS', 'CLUSTER FAILOVER', 'CLUSTER FORGET', "FLUSHALL", "FLUSHDB", + 'CLUSTER DELSLOTS', 'CLUSTER FAILOVER', 'CLUSTER FORGET', "FLUSHALL", "FLUSHDB", "CLIENT ID", ], lambda command, res: res), string_keys_to_dict([ "SCRIPT LOAD", From a076bf438952ff88828f35c2c0c4f11856b88492 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 25 Apr 2019 21:47:40 +0200 Subject: [PATCH 39/65] Fix import error and add new skip function when we don't have a working cluster implementation --- tests/conftest.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 23871eff..f2fe40a9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,7 @@ # 3rd party imports import pytest from redis import Redis +from redis.exceptions import ResponseError from distutils.version import StrictVersion # put our path in front so we can be sure we are testing locally not against the global package @@ -74,6 +75,10 @@ def teardown(): return client +def skip_for_no_cluster_impl(): + return pytest.mark.skipif(True, reason="Cluster has no or working implementation for this test") + + def skip_if_not_password_protected_nodes(): """ """ From 8615f5d9f08729dc54f98165f27faa5e6307b0c5 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Thu, 25 Apr 2019 22:42:12 +0200 Subject: [PATCH 40/65] Add new script that can be used to generate and bruteforce keys to match a given slot --- examples/generate_slot_keys.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 examples/generate_slot_keys.py diff --git a/examples/generate_slot_keys.py b/examples/generate_slot_keys.py new file mode 100644 index 00000000..45001fa9 --- /dev/null +++ b/examples/generate_slot_keys.py @@ -0,0 +1,26 @@ +import random +import string +import sys +from rediscluster import RedisCluster + +startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] + +# Note: decode_responses must be set to True when used with python3 +rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True) + +# 10 batches +batch_set = {i: [] for i in range(0, 16384)} + +# Do 100000 slot randos in each block +for j in range(0, 100000): + rando_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) + + keyslot = rc.connection_pool.nodes.keyslot(rando_string) + + # batch_set.setdefault(keyslot) + batch_set[keyslot].append(rando_string) + +for i in range(0, 16384): + if len(batch_set[i]) > 0: + print(i, ':', batch_set[i]) + sys.exit(0) From 6883f482735b447eb36f704482ff9eea77c579ea Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:14:12 +0200 Subject: [PATCH 41/65] Major update to test_commands and conftest.py to match the redis-py 3.0.1 version source code. All tests that is not skipped now passes. --- tests/conftest.py | 3 +- tests/test_commands.py | 1707 +++++++++++++++++++++++++++++++++------- 2 files changed, 1426 insertions(+), 284 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index f2fe40a9..19ed432d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,9 +10,10 @@ # 3rd party imports import pytest +from distutils.version import StrictVersion +from mock import Mock from redis import Redis from redis.exceptions import ResponseError -from distutils.version import StrictVersion # put our path in front so we can be sure we are testing locally not against the global package basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) diff --git a/tests/test_commands.py b/tests/test_commands.py index 01081bd7..3e6fab94 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -10,10 +10,11 @@ import rediscluster from rediscluster.exceptions import RedisClusterException, ClusterCrossSlotError from rediscluster.utils import dict_merge -from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt +from tests.conftest import skip_if_server_version_lt, skip_if_redis_py_version_lt, skip_if_server_version_gte, skip_for_no_cluster_impl # 3rd party imports import pytest +import redis from redis._compat import unichr, ascii_letters, iteritems, iterkeys, itervalues, unicode from redis.client import parse_info from redis.exceptions import ResponseError, DataError, RedisError, DataError @@ -22,7 +23,7 @@ @pytest.fixture() def slowlog(request, r): - current_config = r.config_get() + current_config = get_main_cluster_node_data(r.config_get()) old_slower_than_value = current_config['slowlog-log-slower-than'] old_max_legnth_value = current_config['slowlog-max-len'] @@ -52,6 +53,16 @@ def get_stream_message(client, stream, message_id): return response[0] +def get_main_cluster_node_data(command_result): + """ + Tries to find whatever node is running on port :7000 in the cluster resonse + """ + for node_id, node_data in command_result.items(): + if '7000' in node_id: + return node_data + return None + + # RESPONSE CALLBACKS class TestResponseCallbacks(object): "Tests for the response callback system" @@ -71,59 +82,109 @@ def test_response_callbacks(self, r): class TestRedisCommands(object): - @skip_if_server_version_lt('2.9.9') - def test_zrevrangebylex(self, r): - r.zadd('a', {'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0, 'g': 0}) - assert r.zrevrangebylex('a', '[c', '-') == [b'c', b'b', b'a'] - assert r.zrevrangebylex('a', '(c', '-') == [b'b', b'a'] - assert r.zrevrangebylex('a', '(g', '[aaa') == \ - [b'f', b'e', b'd', b'c', b'b'] - assert r.zrevrangebylex('a', '+', '[f') == [b'g', b'f'] - assert r.zrevrangebylex('a', '+', '-', start=3, num=2) == \ - [b'd', b'c'] - def test_command_on_invalid_key_type(self, r): r.lpush('a', '1') - with pytest.raises(ResponseError): + with pytest.raises(redis.ResponseError): r['a'] # SERVER INFORMATION def test_client_list(self, r): - for server, clients in r.client_list().items(): - assert isinstance(clients[0], dict) - assert 'addr' in clients[0] - + clients = r.client_list() + client_data = get_main_cluster_node_data(clients)[0] + assert isinstance(client_data, dict) + assert 'addr' in client_data + + @skip_if_server_version_lt('5.0.0') + def test_client_list_type(self, r): + with pytest.raises(exceptions.RedisError): + r.client_list(_type='not a client type') + for client_type in ['normal', 'master', 'replica', 'pubsub']: + clients = get_main_cluster_node_data(r.client_list(_type=client_type)) + assert isinstance(clients, list) + + @skip_if_server_version_lt('5.0.0') + def test_client_id(self, r): + assert get_main_cluster_node_data(r.client_id()) > 0 + + @skip_if_server_version_lt('5.0.0') + def test_client_unblock(self, r): + myid = get_main_cluster_node_data(r.client_id()) + assert not r.client_unblock(myid) + assert not r.client_unblock(myid, error=True) + assert not r.client_unblock(myid, error=False) + + @skip_if_server_version_lt('2.6.9') def test_client_getname(self, r): - for server, name in r.client_getname().items(): - assert name is None + assert get_main_cluster_node_data(r.client_getname()) is None + @skip_if_server_version_lt('2.6.9') + @skip_for_no_cluster_impl() def test_client_setname(self, r): - with pytest.raises(RedisClusterException): - assert r.client_setname('redis_py_test') + assert r.client_setname('redis_py_test') + assert r.client_getname() == 'redis_py_test' + + @skip_if_server_version_lt('2.6.9') + @skip_for_no_cluster_impl() + def test_client_list_after_client_setname(self, r): + r.client_setname('redis_py_test') + clients = r.client_list() + # we don't know which client ours will be + assert 'redis_py_test' in [c['name'] for c in clients] + + @skip_if_server_version_lt('2.9.50') + def test_client_pause(self, r): + assert r.client_pause(1) + assert r.client_pause(timeout=1) + with pytest.raises(exceptions.RedisError): + r.client_pause(timeout='not an integer') def test_config_get(self, r): - for server, data in r.config_get().items(): - assert 'maxmemory' in data - assert data['maxmemory'].isdigit() + data = get_main_cluster_node_data(r.config_get()) + assert 'maxmemory' in data + assert data['maxmemory'].isdigit() def test_config_resetstat(self, r): r.ping() - for server, info in r.info().items(): - prior_commands_processed = int(info['total_commands_processed']) - assert prior_commands_processed >= 1 + + prior_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) + assert prior_commands_processed >= 1 r.config_resetstat() - for server, info in r.info().items(): - reset_commands_processed = int(info['total_commands_processed']) - assert reset_commands_processed < prior_commands_processed + reset_commands_processed = int(get_main_cluster_node_data(r.info())['total_commands_processed']) + assert reset_commands_processed < prior_commands_processed def test_config_set(self, r): - assert r.config_set('dbfilename', 'redis_py_test.rdb') - for server, config in r.config_get().items(): - assert config['dbfilename'] == 'redis_py_test.rdb' + data = get_main_cluster_node_data(r.config_get()) + rdbname = data['dbfilename'] + try: + assert r.config_set('dbfilename', 'redis_py_test.rdb') + assert get_main_cluster_node_data(r.config_get())['dbfilename'] == 'redis_py_test.rdb' + finally: + assert r.config_set('dbfilename', rdbname) + + def test_dbsize(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + # Count all commands sent to the DB. Since we have one slave + # for every master we will look for 4 and not 2 + dbsize_sum = sum([db_size_count for node_id, db_size_count in r.dbsize().items()]) + assert dbsize_sum == 4 def test_echo(self, r): - for server, res in r.echo('foo bar').items(): - assert res == b'foo bar' + assert get_main_cluster_node_data(r.echo('foo bar')) == b'foo bar' + + def test_info(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + info = get_main_cluster_node_data(r.info()) + assert isinstance(info, dict) + # We only have a "db0" in cluster mode and only one of the commands will bind to node :7000 + assert info['db0']['keys'] == 1 + # Sum all keys in all slots + keys_sum = sum([node_data.get('db0', {}).get('keys', 0) for node_id, node_data in r.info().items()]) + assert keys_sum == 4 + + def test_lastsave(self, r): + assert isinstance(get_main_cluster_node_data(r.lastsave()), datetime.datetime) def test_object(self, r): r['a'] = 'foo' @@ -135,11 +196,50 @@ def test_object(self, r): def test_ping(self, r): assert r.ping() + @skip_for_no_cluster_impl() + def test_slowlog_get(self, r, slowlog): + assert r.slowlog_reset() + unicode_string = unichr(3456) + 'abcd' + unichr(3421) + r.get(unicode_string) + slowlog = get_main_cluster_node_data(r.slowlog_get()) + assert isinstance(slowlog, list) + commands = [log['command'] for log in slowlog] + + get_command = b' '.join((b'GET', unicode_string.encode('utf-8'))) + assert get_command in commands + assert b'SLOWLOG RESET' in commands + # the order should be ['GET ', 'SLOWLOG RESET'], + # but if other clients are executing commands at the same time, there + # could be commands, before, between, or after, so just check that + # the two we care about are in the appropriate order. + assert commands.index(get_command) < commands.index(b'SLOWLOG RESET') + + # make sure other attributes are typed correctly + assert isinstance(slowlog[0]['start_time'], int) + assert isinstance(slowlog[0]['duration'], int) + + @skip_for_no_cluster_impl() + def test_slowlog_get_limit(self, r, slowlog): + assert r.slowlog_reset() + r.get('foo') + r.get('bar') + slowlog = r.slowlog_get(1) + assert isinstance(slowlog, list) + commands = [log['command'] for log in slowlog] + assert b'GET foo' not in commands + assert b'GET bar' in commands + + @skip_for_no_cluster_impl() + def test_slowlog_length(self, r, slowlog): + r.get('foo') + assert isinstance(r.slowlog_len(), int) + + @skip_if_server_version_lt('2.6.0') def test_time(self, r): - for t in r.time().values(): - assert len(t) == 2 - assert isinstance(t[0], int) - assert isinstance(t[1], int) + t = get_main_cluster_node_data(r.time()) + assert len(t) == 2 + assert isinstance(t[0], int) + assert isinstance(t[1], int) # BASIC KEY COMMANDS def test_append(self, r): @@ -148,6 +248,7 @@ def test_append(self, r): assert r.append('a', 'a2') == 4 assert r['a'] == b'a1a2' + @skip_if_server_version_lt('2.6.0') def test_bitcount(self, r): r.setbit('a', 5, True) assert r.bitcount('a') == 1 @@ -167,16 +268,64 @@ def test_bitcount(self, r): assert r.bitcount('a', 1, 1) == 1 def test_bitop_not_supported(self, r): + """ + Validate that the command is blocked in cluster mode and throws an Exception + """ r['a'] = '' with pytest.raises(RedisClusterException): r.bitop('not', 'r', 'a') + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_not_empty_string(self, r): + r['a'] = '' + r.bitop('not', 'r', 'a') + assert r.get('r') is None + + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_not(self, r): + test_str = b'\xAA\x00\xFF\x55' + correct = ~0xAA00FF55 & 0xFFFFFFFF + r['a'] = test_str + r.bitop('not', 'r', 'a') + assert int(binascii.hexlify(r['r']), 16) == correct + + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_not_in_place(self, r): + test_str = b'\xAA\x00\xFF\x55' + correct = ~0xAA00FF55 & 0xFFFFFFFF + r['a'] = test_str + r.bitop('not', 'a', 'a') + assert int(binascii.hexlify(r['a']), 16) == correct + + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_single_string(self, r): + test_str = b'\x01\x02\xFF' + r['a'] = test_str + r.bitop('and', 'res1', 'a') + r.bitop('or', 'res2', 'a') + r.bitop('xor', 'res3', 'a') + assert r['res1'] == test_str + assert r['res2'] == test_str + assert r['res3'] == test_str + + @skip_if_server_version_lt('2.6.0') + @skip_for_no_cluster_impl() + def test_bitop_string_operands(self, r): + r['a'] = b'\x01\x02\xFF\xFF' + r['b'] = b'\x01\x02\xFF' + r.bitop('and', 'res1', 'a', 'b') + r.bitop('or', 'res2', 'a', 'b') + r.bitop('xor', 'res3', 'a', 'b') + assert int(binascii.hexlify(r['res1']), 16) == 0x0102FF00 + assert int(binascii.hexlify(r['res2']), 16) == 0x0102FFFF + assert int(binascii.hexlify(r['res3']), 16) == 0x000000FF + @skip_if_server_version_lt('2.8.7') - @skip_if_redis_py_version_lt("2.10.2") def test_bitpos(self, r): - """ - Bitpos was added in redis-py in version 2.10.2 - """ key = 'key:bitpos' r.set(key, b'\xff\xf0\x00') assert r.bitpos(key, 0) == 12 @@ -185,20 +334,16 @@ def test_bitpos(self, r): r.set(key, b'\x00\xff\xf0') assert r.bitpos(key, 1, 0) == 8 assert r.bitpos(key, 1, 1) == 8 - r.set(key, '\x00\x00\x00') + r.set(key, b'\x00\x00\x00') assert r.bitpos(key, 1) == -1 @skip_if_server_version_lt('2.8.7') - @skip_if_redis_py_version_lt("2.10.2") def test_bitpos_wrong_arguments(self, r): - """ - Bitpos was added in redis-py in version 2.10.2 - """ key = 'key:bitpos:wrong:args' r.set(key, b'\xff\xf0\x00') - with pytest.raises(RedisError): + with pytest.raises(exceptions.RedisError): r.bitpos(key, 0, end=1) == 12 - with pytest.raises(RedisError): + with pytest.raises(exceptions.RedisError): r.bitpos(key, 7) == 12 def test_decr(self, r): @@ -231,6 +376,23 @@ def test_delitem(self, r): del r['a'] assert r.get('a') is None + @skip_if_server_version_lt('4.0.0') + def test_unlink(self, r): + assert r.unlink('a') == 0 + r['a'] = 'foo' + assert r.unlink('a') == 1 + assert r.get('a') is None + + @skip_if_server_version_lt('4.0.0') + @skip_for_no_cluster_impl() + def test_unlink_with_multiple_keys(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + assert r.unlink('a', 'b') == 2 + assert r.get('a') is None + assert r.get('b') is None + + @skip_if_server_version_lt('2.6.0') def test_dump_and_restore(self, r): r['a'] = 'foo' dumped = r.dump('a') @@ -238,28 +400,22 @@ def test_dump_and_restore(self, r): r.restore('a', 0, dumped) assert r['a'] == b'foo' + @skip_if_server_version_lt('3.0.0') + def test_dump_and_restore_and_replace(self, r): + r['a'] = 'bar' + dumped = r.dump('a') + with pytest.raises(redis.ResponseError): + r.restore('a', 0, dumped) + + r.restore('a', 0, dumped, replace=True) + assert r['a'] == b'bar' + def test_exists(self, r): assert r.exists('a') == 0 - r['a'] = 'foo' - r['b'] = 'bar' - assert r.exists('a') == 1 - assert r.exists('b') == 1 - # This no longer works in cluster. See test_exists_fail_not_same_slots() for failing test - # assert r.exists('a', 'b') == 2 - - def test_exists_fail_not_same_slots(self, r): - """ - This test is conditioned on that the 2 keys will be in different slots - """ - key_a = 'a' - key_b = 'b' - assert r.cluster_keyslot(key_a) != r.cluster_keyslot(key_b) - r[key_a] = 'foo' - r[key_b] = 'bar' - assert r.exists('a') == 1 - assert r.exists('b') == 1 - with pytest.raises(ClusterCrossSlotError): - r.exists('a', 'b') + r['G0B96'] = 'foo' + r['TEFX5'] = 'bar' + assert r.exists('G0B96') == 1 + assert r.exists('G0B96', 'TEFX5') == 2 def test_exists_contains(self, r): assert 'a' not in r @@ -313,6 +469,10 @@ def test_getitem_raises_keyerror_for_missing_key(self, r): with pytest.raises(KeyError): r['a'] + def test_getitem_does_not_raise_keyerror_for_empty_string(self, r): + r['a'] = b"" + assert r['a'] == b"" + def test_get_set_bit(self, r): # no value assert not r.getbit('a', 5) @@ -353,6 +513,7 @@ def test_incrby(self, r): assert r.incrby('a', 4) == 5 assert r['a'] == b'5' + @skip_if_server_version_lt('2.6.0') def test_incrbyfloat(self, r): assert r.incrbyfloat('a') == 1.0 assert r['a'] == b'1' @@ -391,26 +552,28 @@ def test_msetnx(self, r): assert r[k] == v assert r.get('d') is None + @skip_if_server_version_lt('2.6.0') def test_pexpire(self, r): assert not r.pexpire('a', 60000) r['a'] = 'foo' assert r.pexpire('a', 60000) assert 0 < r.pttl('a') <= 60000 assert r.persist('a') - # redis-py tests seemed to be for older version of redis? - # redis-2.8+ returns -1 if key exists but is non-expiring: http://redis.io/commands/pttl assert r.pttl('a') == -1 + @skip_if_server_version_lt('2.6.0') def test_pexpireat_datetime(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) r['a'] = 'foo' assert r.pexpireat('a', expire_at) assert 0 < r.pttl('a') <= 61000 + @skip_if_server_version_lt('2.6.0') def test_pexpireat_no_key(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) assert not r.pexpireat('a', expire_at) + @skip_if_server_version_lt('2.6.0') def test_pexpireat_unixtime(self, r): expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) r['a'] = 'foo' @@ -418,17 +581,33 @@ def test_pexpireat_unixtime(self, r): assert r.pexpireat('a', expire_at_seconds) assert 0 < r.pttl('a') <= 61000 + @skip_if_server_version_lt('2.6.0') def test_psetex(self, r): assert r.psetex('a', 1000, 'value') assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 + @skip_if_server_version_lt('2.6.0') def test_psetex_timedelta(self, r): expire_at = datetime.timedelta(milliseconds=1000) assert r.psetex('a', expire_at, 'value') assert r['a'] == b'value' assert 0 < r.pttl('a') <= 1000 + @skip_if_server_version_lt('2.6.0') + def test_pttl(self, r): + assert not r.pexpire('a', 10000) + r['a'] = '1' + assert r.pexpire('a', 10000) + assert 0 < r.pttl('a') <= 10000 + assert r.persist('a') + assert r.pttl('a') == -1 + + @skip_if_server_version_lt('2.8.0') + def test_pttl_no_key(self, r): + "PTTL on servers 2.8 and after return -2 when the key doesn't exist" + assert r.pttl('a') == -2 + def test_randomkey(self, r): assert r.randomkey() is None for key in ('a', 'b', 'c'): @@ -441,15 +620,6 @@ def test_rename(self, r): assert r.get('a') is None assert r['b'] == b'1' - with pytest.raises(ResponseError) as ex: - r.rename("foo", "foo") - assert unicode(ex.value).startswith("source and destination objects are the same") - - assert r.get("foo") is None - with pytest.raises(ResponseError) as ex: - r.rename("foo", "bar") - assert unicode(ex.value).startswith("no such key") - def test_renamenx(self, r): r['a'] = '1' r['b'] = '2' @@ -457,14 +627,13 @@ def test_renamenx(self, r): assert r['a'] == b'1' assert r['b'] == b'2' - assert r.renamenx('a', 'c') - assert r['c'] == b'1' - + @skip_if_server_version_lt('2.6.0') def test_set_nx(self, r): assert r.set('a', '1', nx=True) assert not r.set('a', '2', nx=True) assert r['a'] == b'1' + @skip_if_server_version_lt('2.6.0') def test_set_xx(self, r): assert not r.set('a', '1', xx=True) assert r.get('a') is None @@ -472,27 +641,32 @@ def test_set_xx(self, r): assert r.set('a', '2', xx=True) assert r.get('a') == b'2' + @skip_if_server_version_lt('2.6.0') def test_set_px(self, r): assert r.set('a', '1', px=10000) assert r['a'] == b'1' assert 0 < r.pttl('a') <= 10000 assert 0 < r.ttl('a') <= 10 + @skip_if_server_version_lt('2.6.0') def test_set_px_timedelta(self, r): expire_at = datetime.timedelta(milliseconds=1000) assert r.set('a', '1', px=expire_at) assert 0 < r.pttl('a') <= 1000 assert 0 < r.ttl('a') <= 1 + @skip_if_server_version_lt('2.6.0') def test_set_ex(self, r): assert r.set('a', '1', ex=10) assert 0 < r.ttl('a') <= 10 + @skip_if_server_version_lt('2.6.0') def test_set_ex_timedelta(self, r): expire_at = datetime.timedelta(seconds=60) assert r.set('a', '1', ex=expire_at) assert 0 < r.ttl('a') <= 60 + @skip_if_server_version_lt('2.6.0') def test_set_multipleoptions(self, r): r['a'] = 'val' assert r.set('a', '1', xx=True, px=10000) @@ -527,6 +701,18 @@ def test_substr(self, r): assert r.substr('a', 3, 5) == b'345' assert r.substr('a', 3, -2) == b'345678' + def test_ttl(self, r): + r['a'] = '1' + assert r.expire('a', 10) + assert 0 < r.ttl('a') <= 10 + assert r.persist('a') + assert r.ttl('a') == -1 + + @skip_if_server_version_lt('2.8.0') + def test_ttl_nokey(self, r): + "TTL on servers 2.8 and after return -2 when the key doesn't exist" + assert r.ttl('a') == -2 + def test_type(self, r): assert r.type('a') == b'none' r['a'] = '1' @@ -543,35 +729,43 @@ def test_type(self, r): # LIST COMMANDS def test_blpop(self, r): - r.rpush('a{foo}', '1', '2') - r.rpush('b{foo}', '3', '4') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'3') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'4') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'1') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'2') - assert r.blpop(['b{foo}', 'a{foo}'], timeout=1) is None - r.rpush('c{foo}', '1') - assert r.blpop('c{foo}', timeout=1) == (b'c{foo}', b'1') + """ + Generated keys for slot + 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] + """ + r.rpush('0J8KD', '1', '2') + r.rpush('822JO', '3', '4') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') + assert r.blpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') + assert r.blpop(['822JO', '0J8KD'], timeout=1) is None + r.rpush('c', '1') + assert r.blpop('c', timeout=1) == (b'c', b'1') def test_brpop(self, r): - r.rpush('a{foo}', '1', '2') - r.rpush('b{foo}', '3', '4') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'4') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'b{foo}', b'3') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'2') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b'a{foo}', b'1') - assert r.brpop(['b{foo}', 'a{foo}'], timeout=1) is None - r.rpush('c{foo}', '1') - assert r.brpop('c{foo}', timeout=1) == (b'c{foo}', b'1') + """ + Generated keys for slot + 16299: ['0J8KD', '822JO', '8TJPT', 'HD644', 'SKUCM', 'N4N5Z', 'NRSWJ'] + """ + r.rpush('0J8KD', '1', '2') + r.rpush('822JO', '3', '4') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'4') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'822JO', b'3') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'2') + assert r.brpop(['822JO', '0J8KD'], timeout=1) == (b'0J8KD', b'1') + assert r.brpop(['822JO', '0J8KD'], timeout=1) is None + r.rpush('c', '1') + assert r.brpop('c', timeout=1) == (b'c', b'1') def test_brpoplpush(self, r): - r.rpush('a{foo}', '1', '2') - r.rpush('b{foo}', '3', '4') - assert r.brpoplpush('a{foo}', 'b{foo}') == b'2' - assert r.brpoplpush('a{foo}', 'b{foo}') == b'1' - assert r.brpoplpush('a{foo}', 'b{foo}', timeout=1) is None - assert r.lrange('a{foo}', 0, -1) == [] - assert r.lrange('b{foo}', 0, -1) == [b'1', b'2', b'3', b'4'] + r.rpush('a', '1', '2') + r.rpush('b', '3', '4') + assert r.brpoplpush('a', 'b') == b'2' + assert r.brpoplpush('a', 'b') == b'1' + assert r.brpoplpush('a', 'b', timeout=1) is None + assert r.lrange('a', 0, -1) == [] + assert r.lrange('b', 0, -1) == [b'1', b'2', b'3', b'4'] def test_brpoplpush_empty_string(self, r): r.rpush('a', '') @@ -672,55 +866,41 @@ def test_rpushx(self, r): assert r.lrange('a', 0, -1) == [b'1', b'2', b'3', b'4'] # SCAN COMMANDS - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_scan(self, r): + """ + Generated keys for slot + 0 : ['GQ5KU', 'IFWJL', 'X582D'] + """ + r.set('GQ5KU', 1) + r.set('IFWJL', 2) + r.set('X582D', 3) + cursor, keys = get_main_cluster_node_data(r.scan()) + assert cursor == 0 + assert set(keys) == {b'GQ5KU', b'IFWJL', b'X582D'} + _, keys = get_main_cluster_node_data(r.scan(match='GQ5KU')) + assert set(keys) == {b'GQ5KU'} + + @skip_if_server_version_lt('2.8.0') + def test_scan_iter(self, r): r.set('a', 1) r.set('b', 2) r.set('c', 3) - keys = [] - for result in r.scan().values(): - cursor, partial_keys = result - assert cursor == 0 - keys += partial_keys - - assert set(keys) == {b'a', b'b', b'c'} - - keys = [] - for result in r.scan(match='a').values(): - cursor, partial_keys = result - assert cursor == 0 - keys += partial_keys - assert set(keys) == {b'a'} - - @pytest.mark.skip(reason="WIP") - def test_scan_iter(self, r): - alphabet = 'abcdefghijklmnopqrstuvwABCDEFGHIJKLMNOPQRSTUVW' - for i, c in enumerate(alphabet): - r.set(c, i) keys = list(r.scan_iter()) - expected_result = [b"{0}".format(c) for c in alphabet] - assert set(keys) == set(expected_result) - + assert set(keys) == {b'a', b'b', b'c'} keys = list(r.scan_iter(match='a')) assert set(keys) == {b'a'} - r.set('Xa', 1) - r.set('Xb', 2) - r.set('Xc', 3) - keys = list(r.scan_iter('X*', count=1000)) - assert len(keys) == 3 - assert set(keys) == {b'Xa', b'Xb', b'Xc'} - - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_sscan(self, r): r.sadd('a', 1, 2, 3) cursor, members = r.sscan('a') assert cursor == 0 - assert set(members) == {b'a', b'2', b'3'} + assert set(members) == {b'1', b'2', b'3'} _, members = r.sscan('a', match=b'1') assert set(members) == {b'1'} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_sscan_iter(self, r): r.sadd('a', 1, 2, 3) members = list(r.sscan_iter('a')) @@ -728,7 +908,7 @@ def test_sscan_iter(self, r): members = list(r.sscan_iter('a', match=b'1')) assert set(members) == {b'1'} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_hscan(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) cursor, dic = r.hscan('a') @@ -737,7 +917,7 @@ def test_hscan(self, r): _, dic = r.hscan('a', match='a') assert dic == {b'a': b'1'} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_hscan_iter(self, r): r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) dic = dict(r.hscan_iter('a')) @@ -745,18 +925,18 @@ def test_hscan_iter(self, r): dic = dict(r.hscan_iter('a', match='a')) assert dic == {b'a': b'1'} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_zscan(self, r): - r.zadd('a', 1, 'a', 2, 'b', 3, 'c') + r.zadd('a', {'a': 1, 'b': 2, 'c': 3}) cursor, pairs = r.zscan('a') assert cursor == 0 - assert set(pairs) == {(b'a', 1), (b'b, 2'), (b'c', 3)} + assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)} _, pairs = r.zscan('a', match='a') - assert set(pairs == {(b'a', 1)}) + assert set(pairs) == {(b'a', 1)} - @pytest.mark.skip(reason="WIP") + @skip_if_server_version_lt('2.8.0') def test_zscan_iter(self, r): - r.zadd('a', 1, 'a', 2, 'b', 3, 'c') + r.zadd('a', {'a': 1, 'b': 2, 'c': 3}) pairs = list(r.zscan_iter('a')) assert set(pairs) == {(b'a', 1), (b'b', 2), (b'c', 3)} pairs = list(r.zscan_iter('a', match='a')) @@ -764,7 +944,7 @@ def test_zscan_iter(self, r): # SET COMMANDS def test_sadd(self, r): - members = set([b'1', b'2', b'3']) + members = {b'1', b'2', b'3'} r.sadd('a', *members) assert r.smembers('a') == members @@ -773,35 +953,32 @@ def test_scard(self, r): assert r.scard('a') == 3 def test_sdiff(self, r): - r.sadd('a{foo}', '1', '2', '3') - assert r.sdiff('a{foo}', 'b{foo}') == set([b'1', b'2', b'3']) - r.sadd('b{foo}', '2', '3') - assert r.sdiff('a{foo}', 'b{foo}') == set([b'1']) + r.sadd('a', '1', '2', '3') + assert r.sdiff('a', 'b') == {b'1', b'2', b'3'} + r.sadd('b', '2', '3') + assert r.sdiff('a', 'b') == {b'1'} def test_sdiffstore(self, r): - r.sadd('a{foo}', '1', '2', '3') - assert r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 3 - assert r.smembers('c{foo}') == set([b'1', b'2', b'3']) - r.sadd('b{foo}', '2', '3') - assert r.sdiffstore('c{foo}', 'a{foo}', 'b{foo}') == 1 - assert r.smembers('c{foo}') == set([b'1']) - - # Diff:s that return empty set should not fail - r.sdiffstore('d{foo}', 'e{foo}') == 0 + r.sadd('a', '1', '2', '3') + assert r.sdiffstore('c', 'a', 'b') == 3 + assert r.smembers('c') == {b'1', b'2', b'3'} + r.sadd('b', '2', '3') + assert r.sdiffstore('c', 'a', 'b') == 1 + assert r.smembers('c') == {b'1'} def test_sinter(self, r): - r.sadd('a{foo}', '1', '2', '3') - assert r.sinter('a{foo}', 'b{foo}') == set() - r.sadd('b{foo}', '2', '3') - assert r.sinter('a{foo}', 'b{foo}') == set([b'2', b'3']) + r.sadd('a', '1', '2', '3') + assert r.sinter('a', 'b') == set() + r.sadd('b', '2', '3') + assert r.sinter('a', 'b') == {b'2', b'3'} def test_sinterstore(self, r): - r.sadd('a{foo}', '1', '2', '3') - assert r.sinterstore('c{foo}', 'a{foo}', 'b{foo}') == 0 - assert r.smembers('c{foo}') == set() - r.sadd('b{foo}', '2', '3') - assert r.sinterstore('c{foo}', 'a{foo}', 'b{foo}') == 2 - assert r.smembers('c{foo}') == set([b'2', b'3']) + r.sadd('a', '1', '2', '3') + assert r.sinterstore('c', 'a', 'b') == 0 + assert r.smembers('c') == set() + r.sadd('b', '2', '3') + assert r.sinterstore('c', 'a', 'b') == 2 + assert r.smembers('c') == {b'2', b'3'} def test_sismember(self, r): r.sadd('a', '1', '2', '3') @@ -815,11 +992,11 @@ def test_smembers(self, r): assert r.smembers('a') == {b'1', b'2', b'3'} def test_smove(self, r): - r.sadd('a{foo}', 'a1', 'a2') - r.sadd('b{foo}', 'b1', 'b2') - assert r.smove('a{foo}', 'b{foo}', 'a1') - assert r.smembers('a{foo}') == {b'a2'} - assert r.smembers('b{foo}') == {b'b1', b'b2', b'a1'} + r.sadd('a', 'a1', 'a2') + r.sadd('b', 'b1', 'b2') + assert r.smove('a', 'b', 'a1') + assert r.smembers('a') == {b'a2'} + assert r.smembers('b') == {b'b1', b'b2', b'a1'} def test_spop(self, r): s = [b'1', b'2', b'3'] @@ -828,11 +1005,23 @@ def test_spop(self, r): assert value in s assert r.smembers('a') == set(s) - {value} + def test_spop_multi_value(self, r): + s = [b'1', b'2', b'3'] + r.sadd('a', *s) + values = r.spop('a', 2) + assert len(values) == 2 + + for value in values: + assert value in s + + assert r.spop('a', 1) == list(set(s) - set(values)) + def test_srandmember(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) assert r.srandmember('a') in s + @skip_if_server_version_lt('2.6.0') def test_srandmember_multi_value(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) @@ -844,18 +1033,18 @@ def test_srem(self, r): r.sadd('a', '1', '2', '3', '4') assert r.srem('a', '5') == 0 assert r.srem('a', '2', '4') == 2 - assert r.smembers('a') == set([b'1', b'3']) + assert r.smembers('a') == {b'1', b'3'} def test_sunion(self, r): - r.sadd('a{foo}', '1', '2') - r.sadd('b{foo}', '2', '3') - assert r.sunion('a{foo}', 'b{foo}') == set([b'1', b'2', b'3']) + r.sadd('a', '1', '2') + r.sadd('b', '2', '3') + assert r.sunion('a', 'b') == {b'1', b'2', b'3'} def test_sunionstore(self, r): - r.sadd('a{foo}', '1', '2') - r.sadd('b{foo}', '2', '3') - assert r.sunionstore('c{foo}', 'a{foo}', 'b{foo}') == 3 - assert r.smembers('c{foo}') == set([b'1', b'2', b'3']) + r.sadd('a', '1', '2') + r.sadd('b', '2', '3') + assert r.sunionstore('c', 'a', 'b') == 3 + assert r.smembers('c') == {b'1', b'2', b'3'} # SORTED SET COMMANDS def test_zadd(self, r): @@ -898,8 +1087,12 @@ def test_zadd_incr(self, r): assert r.zadd('a', {'a1': 1}) == 1 assert r.zadd('a', {'a1': 4.5}, incr=True) == 5.5 - @pytest.mark.skip(reason="Test works if done against keys in same slot") + @skip_for_no_cluster_impl() def test_zadd_incr_with_xx(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ # this asks zadd to incr 'a1' only if it exists, but it clearly # doesn't. Redis returns a null value in this case and so should # redis-py @@ -930,95 +1123,113 @@ def test_zlexcount(self, r): assert r.zlexcount('a', '-', '+') == 7 assert r.zlexcount('a', '[b', '[f') == 5 - def test_zinterstore_fail_cross_slot(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('a', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('a', {'a1': 6, 'a2': 5, 'a3': 4}) - with pytest.raises(ResponseError) as excinfo: - r.zinterstore('d', ['a', 'b', 'c']) - assert re.search('ClusterCrossSlotError', str(excinfo)) - - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_sum(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d', ['a', 'b', 'c']) == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a3', 8), (b'a1', 9)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_max(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MAX') == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a3', 5), (b'a1', 6)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_min(self, r): - r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) - r.zadd('b', {'a1': 2, 'a2': 3, 'a3': 5}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MIN') == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 3, 'a3': 5}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a3', 3)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zinterstore_with_weight(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zinterstore('d', {'a': 1, 'b': 2, 'c': 3}) == 2 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zinterstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 2 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a3', 20), (b'a1', 23)] @skip_if_server_version_lt('4.9.0') - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zpopmax(self, r): - r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) - assert r.zpopmax('a') == [(b'a3', 3)] + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmax('60ZE7') == [(b'a3', 3)] # with count - assert r.zpopmax('a', count=2) == \ + assert r.zpopmax('60ZE7', count=2) == \ [(b'a2', 2), (b'a1', 1)] @skip_if_server_version_lt('4.9.0') - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zpopmin(self, r): - r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) - assert r.zpopmin('a') == [(b'a1', 1)] + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + assert r.zpopmin('60ZE7') == [(b'a1', 1)] # with count - assert r.zpopmin('a', count=2) == \ + assert r.zpopmin('60ZE7', count=2) == \ [(b'a2', 2), (b'a3', 3)] @skip_if_server_version_lt('4.9.0') - @pytest.mark.skip(reason="Test works if done against keys in same slot") + @skip_for_no_cluster_impl() def test_bzpopmax(self, r): - r.zadd('a', {'a1': 1, 'a2': 2}) - r.zadd('b', {'b1': 10, 'b2': 20}) - assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b2', 20) - assert r.bzpopmax(['b', 'a'], timeout=1) == (b'b', b'b1', 10) - assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a2', 2) - assert r.bzpopmax(['b', 'a'], timeout=1) == (b'a', b'a1', 1) - assert r.bzpopmax(['b', 'a'], timeout=1) is None - r.zadd('c', {'c1': 100}) - assert r.bzpopmax('c', timeout=1) == (b'c', b'c1', 100) + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2}) + r.zadd('8I2EQ', {'b1': 10, 'b2': 20}) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmax(['8I2EQ', '60ZE7'], timeout=1) is None + r.zadd('R8H1V', {'c1': 100}) + assert r.bzpopmax('R8H1V', timeout=1) == (b'c', b'c1', 100) @skip_if_server_version_lt('4.9.0') - @pytest.mark.skip(reason="Test works if done against keys in same slot") + @skip_for_no_cluster_impl() def test_bzpopmin(self, r): - r.zadd('a', {'a1': 1, 'a2': 2}) - r.zadd('b', {'b1': 10, 'b2': 20}) - assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b1', 10) - assert r.bzpopmin(['b', 'a'], timeout=1) == (b'b', b'b2', 20) - assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a1', 1) - assert r.bzpopmin(['b', 'a'], timeout=1) == (b'a', b'a2', 2) - assert r.bzpopmin(['b', 'a'], timeout=1) is None - r.zadd('c', {'c1': 100}) - assert r.bzpopmin('c', timeout=1) == (b'c', b'c1', 100) + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2}) + r.zadd('8I2EQ', {'b1': 10, 'b2': 20}) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b1', 10) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'b', b'b2', 20) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a1', 1) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) == (b'a', b'a2', 2) + assert r.bzpopmin(['8I2EQ', '60ZE7'], timeout=1) is None + r.zadd('R8H1V', {'c1': 100}) + assert r.bzpopmin('R8H1V', timeout=1) == (b'c', b'c1', 100) def test_zrange(self, r): r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) @@ -1158,51 +1369,56 @@ def test_zscore(self, r): assert r.zscore('a', 'a2') == 2.0 assert r.zscore('a', 'a4') is None - def test_zunionstore_fail_crossslot(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a2': 5, 'a3': 4}) - with pytest.raises(ResponseError) as excinfo: - r.zunionstore('d', ['a', 'b', 'c']) - assert re.search('ClusterCrossSlotError', str(excinfo)) - - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_sum(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d', ['a', 'b', 'c']) == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V']) == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a2', 3), (b'a4', 4), (b'a3', 8), (b'a1', 9)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_max(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MAX') == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a2', 2), (b'a4', 4), (b'a3', 5), (b'a1', 6)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_min(self, r): - r.zadd('a', {'a1': 1, 'a2': 2, 'a3': 3}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 4}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 2, 'a3': 3}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 4}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', ['60ZE7', '8I2EQ', 'R8H1V'], aggregate='MIN') == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a1', 1), (b'a2', 2), (b'a3', 3), (b'a4', 4)] - @pytest.mark.skip(reason="Test works if done against keys in same slot") def test_zunionstore_with_weight(self, r): - r.zadd('a', {'a1': 1, 'a2': 1, 'a3': 1}) - r.zadd('b', {'a1': 2, 'a2': 2, 'a3': 2}) - r.zadd('c', {'a1': 6, 'a3': 5, 'a4': 4}) - assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4 - assert r.zrange('d', 0, -1, withscores=True) == \ + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.zadd('60ZE7', {'a1': 1, 'a2': 1, 'a3': 1}) + r.zadd('8I2EQ', {'a1': 2, 'a2': 2, 'a3': 2}) + r.zadd('R8H1V', {'a1': 6, 'a3': 5, 'a4': 4}) + assert r.zunionstore('NJP6N', {'60ZE7': 1, '8I2EQ': 2, 'R8H1V': 3}) == 4 + assert r.zrange('NJP6N', 0, -1, withscores=True) == \ [(b'a2', 5), (b'a4', 12), (b'a3', 20), (b'a1', 23)] -# # HYPERLOGLOG TESTS + # HYPERLOGLOG TESTS + @skip_if_server_version_lt('2.8.9') def test_pfadd(self, r): members = {b'1', b'2', b'3'} assert r.pfadd('a', *members) == 1 @@ -1220,6 +1436,7 @@ def test_pfcount(self, r): assert r.pfcount('b') == len(members_b) assert r.pfcount('a', 'b') == len(members_b.union(members)) + @skip_if_server_version_lt('2.8.9') def test_pfmerge(self, r): mema = {b'1', b'2', b'3'} memb = {b'2', b'3', b'4'} @@ -1272,6 +1489,7 @@ def test_hincrby(self, r): assert r.hincrby('a', '1', amount=2) == 3 assert r.hincrby('a', '1', amount=-2) == 1 + @skip_if_server_version_lt('2.6.0') def test_hincrbyfloat(self, r): assert r.hincrbyfloat('a', '1') == 1.0 assert r.hincrbyfloat('a', '1') == 2.0 @@ -1311,6 +1529,929 @@ def test_hvals(self, r): remote_vals = r.hvals('a') assert sorted(local_vals) == sorted(remote_vals) + @skip_if_server_version_lt('3.2.0') + def test_hstrlen(self, r): + r.hmset('a', {'1': '22', '2': '333'}) + assert r.hstrlen('a', '1') == 2 + assert r.hstrlen('a', '2') == 3 + + # SORT + def test_sort_basic(self, r): + r.rpush('a', '3', '2', '1', '4') + assert r.sort('a') == [b'1', b'2', b'3', b'4'] + + def test_sort_limited(self, r): + r.rpush('a', '3', '2', '1', '4') + assert r.sort('a', start=1, num=2) == [b'2', b'3'] + + @skip_for_no_cluster_impl() + def test_sort_by(self, r): + r['score:1'] = 8 + r['score:2'] = 3 + r['score:3'] = 5 + r.rpush('a', '3', '2', '1') + assert r.sort('a', by='score:*') == [b'2', b'3', b'1'] + + @skip_for_no_cluster_impl() + def test_sort_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get='user:*') == [b'u1', b'u2', b'u3'] + + @skip_for_no_cluster_impl() + def test_sort_get_multi(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', '#')) == \ + [b'u1', b'1', b'u2', b'2', b'u3', b'3'] + + @skip_for_no_cluster_impl() + def test_sort_get_groups_two(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', '#'), groups=True) == \ + [(b'u1', b'1'), (b'u2', b'2'), (b'u3', b'3')] + + def test_sort_groups_string_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + with pytest.raises(exceptions.DataError): + r.sort('a', get='user:*', groups=True) + + def test_sort_groups_just_one_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + with pytest.raises(exceptions.DataError): + r.sort('a', get=['user:*'], groups=True) + + def test_sort_groups_no_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + with pytest.raises(exceptions.DataError): + r.sort('a', groups=True) + + @skip_for_no_cluster_impl() + def test_sort_groups_three_gets(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r['door:1'] = 'd1' + r['door:2'] = 'd2' + r['door:3'] = 'd3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == \ + [ + (b'u1', b'd1', b'1'), + (b'u2', b'd2', b'2'), + (b'u3', b'd3', b'3') + ] + + def test_sort_desc(self, r): + r.rpush('a', '2', '3', '1') + assert r.sort('a', desc=True) == [b'3', b'2', b'1'] + + def test_sort_alpha(self, r): + r.rpush('a', 'e', 'c', 'b', 'd', 'a') + assert r.sort('a', alpha=True) == \ + [b'a', b'b', b'c', b'd', b'e'] + + def test_sort_store(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r.rpush('60ZE7', '2', '3', '1') + assert r.sort('60ZE7', store='8I2EQ') == 3 + assert r.lrange('8I2EQ', 0, -1) == [b'1', b'2', b'3'] + + @skip_for_no_cluster_impl() + def test_sort_all_options(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ', 'R8H1V', 'NJP6N', '0VI0A', '0CEIC', 'MV75A', 'TMKD9'] + """ + r['user:1:username'] = 'zeus' + r['user:2:username'] = 'titan' + r['user:3:username'] = 'hermes' + r['user:4:username'] = 'hercules' + r['user:5:username'] = 'apollo' + r['user:6:username'] = 'athena' + r['user:7:username'] = 'hades' + r['user:8:username'] = 'dionysus' + + r['user:1:favorite_drink'] = 'yuengling' + r['user:2:favorite_drink'] = 'rum' + r['user:3:favorite_drink'] = 'vodka' + r['user:4:favorite_drink'] = 'milk' + r['user:5:favorite_drink'] = 'pinot noir' + r['user:6:favorite_drink'] = 'water' + r['user:7:favorite_drink'] = 'gin' + r['user:8:favorite_drink'] = 'apple juice' + + r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4') + num = r.sort('gods', start=2, num=4, by='user:*:username', + get='user:*:favorite_drink', desc=True, alpha=True, + store='sorted') + assert num == 4 + assert r.lrange('sorted', 0, 10) == \ + [b'vodka', b'milk', b'gin', b'apple juice'] + + def test_sort_issue_924(self, r): + # Tests for issue https://github.com/andymccurdy/redis-py/issues/924 + r.execute_command('SADD', 'issue#924', 1) + r.execute_command('SORT', 'issue#924') + + @skip_for_no_cluster_impl() + def test_cluster_addslots(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('ADDSLOTS', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_count_failure_reports(self, mock_cluster_resp_int): + assert isinstance(mock_cluster_resp_int.cluster( + 'COUNT-FAILURE-REPORTS', 'node'), int) + + @skip_for_no_cluster_impl() + def test_cluster_countkeysinslot(self, mock_cluster_resp_int): + assert isinstance(mock_cluster_resp_int.cluster( + 'COUNTKEYSINSLOT', 2), int) + + @skip_for_no_cluster_impl() + def test_cluster_delslots(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('DELSLOTS', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_failover(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('FAILOVER', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_forget(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('FORGET', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_info(self, mock_cluster_resp_info): + assert isinstance(mock_cluster_resp_info.cluster('info'), dict) + + @skip_for_no_cluster_impl() + def test_cluster_keyslot(self, mock_cluster_resp_int): + assert isinstance(mock_cluster_resp_int.cluster( + 'keyslot', 'asdf'), int) + + @skip_for_no_cluster_impl() + def test_cluster_meet(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('meet', 'ip', 'port', 1) is True + + @skip_for_no_cluster_impl() + def test_cluster_nodes(self, mock_cluster_resp_nodes): + assert isinstance(mock_cluster_resp_nodes.cluster('nodes'), dict) + + @skip_for_no_cluster_impl() + def test_cluster_replicate(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('replicate', 'nodeid') is True + + @skip_for_no_cluster_impl() + def test_cluster_reset(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('reset', 'hard') is True + + @skip_for_no_cluster_impl() + def test_cluster_saveconfig(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('saveconfig') is True + + @skip_for_no_cluster_impl() + def test_cluster_setslot(self, mock_cluster_resp_ok): + assert mock_cluster_resp_ok.cluster('setslot', 1, + 'IMPORTING', 'nodeid') is True + + @skip_for_no_cluster_impl() + def test_cluster_slaves(self, mock_cluster_resp_slaves): + assert isinstance(mock_cluster_resp_slaves.cluster( + 'slaves', 'nodeid'), dict) + + # GEO COMMANDS + @skip_if_server_version_lt('3.2.0') + def test_geoadd(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + assert r.geoadd('barcelona', *values) == 2 + assert r.zcard('barcelona') == 2 + + @skip_if_server_version_lt('3.2.0') + def test_geoadd_invalid_params(self, r): + with pytest.raises(exceptions.RedisError): + r.geoadd('barcelona', *(1, 2)) + + @skip_if_server_version_lt('3.2.0') + def test_geodist(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + assert r.geoadd('barcelona', *values) == 2 + assert r.geodist('barcelona', 'place1', 'place2') == 3067.4157 + + @skip_if_server_version_lt('3.2.0') + def test_geodist_units(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.geodist('barcelona', 'place1', 'place2', 'km') == 3.0674 + + @skip_if_server_version_lt('3.2.0') + def test_geodist_missing_one_member(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') + r.geoadd('barcelona', *values) + assert r.geodist('barcelona', 'place1', 'missing_member', 'km') is None + + @skip_if_server_version_lt('3.2.0') + def test_geodist_invalid_units(self, r): + with pytest.raises(exceptions.RedisError): + assert r.geodist('x', 'y', 'z', 'inches') + + @skip_if_server_version_lt('3.2.0') + def test_geohash(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.geohash('barcelona', 'place1', 'place2') ==\ + ['sp3e9yg3kd0', 'sp3e9cbc3t0'] + + @skip_if_server_version_lt('3.2.0') + def test_geopos(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + # redis uses 52 bits precision, hereby small errors may be introduced. + assert r.geopos('barcelona', 'place1', 'place2') ==\ + [(2.19093829393386841, 41.43379028184083523), + (2.18737632036209106, 41.40634178640635099)] + + @skip_if_server_version_lt('4.0.0') + def test_geopos_no_value(self, r): + assert r.geopos('barcelona', 'place1', 'place2') == [None, None] + + @skip_if_server_version_lt('3.2.0') + @skip_if_server_version_gte('4.0.0') + def test_old_geopos_no_value(self, r): + assert r.geopos('barcelona', 'place1', 'place2') == [] + + @skip_if_server_version_lt('3.2.0') + def test_georadius(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 2.191, 41.433, 1000) == ['place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_no_values(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 1, 2, 1000) == [] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_units(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km') ==\ + ['place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_with(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + + # test a bunch of combinations to test the parse response + # function. + assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', + withdist=True, withcoord=True, withhash=True) ==\ + [['place1', 0.0881, 3471609698139488, + (2.19093829393386841, 41.43379028184083523)]] + + assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', + withdist=True, withcoord=True) ==\ + [['place1', 0.0881, + (2.19093829393386841, 41.43379028184083523)]] + + assert r.georadius('barcelona', 2.191, 41.433, 1, unit='km', + withhash=True, withcoord=True) ==\ + [['place1', 3471609698139488, + (2.19093829393386841, 41.43379028184083523)]] + + # test no values. + assert r.georadius('barcelona', 2, 1, 1, unit='km', + withdist=True, withcoord=True, withhash=True) == [] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_count(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 2.191, 41.433, 3000, count=1) ==\ + ['place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_sort(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='ASC') ==\ + ['place1', 'place2'] + assert r.georadius('barcelona', 2.191, 41.433, 3000, sort='DESC') ==\ + ['place2', 'place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_store(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ'] + """ + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('60ZE7', *values) + r.georadius('60ZE7', 2.191, 41.433, 1000, store='8I2EQ') + assert r.zrange('8I2EQ', 0, -1) == [b'place1'] + + @skip_if_server_version_lt('3.2.0') + def test_georadius_store_dist(self, r): + """ + Generated keys for slot + 0 : ['60ZE7', '8I2EQ'] + """ + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('60ZE7', *values) + r.georadius('60ZE7', 2.191, 41.433, 1000, + store_dist='8I2EQ') + # instead of save the geo score, the distance is saved. + assert r.zscore('8I2EQ', 'place1') == 88.05060698409301 + + @skip_if_server_version_lt('3.2.0') + def test_georadiusmember(self, r): + values = (2.1909389952632, 41.433791470673, 'place1') +\ + (2.1873744593677, 41.406342043777, 'place2') + + r.geoadd('barcelona', *values) + assert r.georadiusbymember('barcelona', 'place1', 4000) ==\ + ['place2', 'place1'] + assert r.georadiusbymember('barcelona', 'place1', 10) == ['place1'] + + assert r.georadiusbymember('barcelona', 'place1', 4000, + withdist=True, withcoord=True, + withhash=True) ==\ + [['place2', 3067.4157, 3471609625421029, + (2.187376320362091, 41.40634178640635)], + ['place1', 0.0, 3471609698139488, + (2.1909382939338684, 41.433790281840835)]] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xack(self, r): + stream = 'stream' + group = 'group' + consumer = 'consumer' + # xack on a stream that doesn't exist + assert r.xack(stream, group, '0-0') == 0 + + m1 = r.xadd(stream, {'one': 'one'}) + m2 = r.xadd(stream, {'two': 'two'}) + m3 = r.xadd(stream, {'three': 'three'}) + + # xack on a group that doesn't exist + assert r.xack(stream, group, m1) == 0 + + r.xgroup_create(stream, group, 0) + r.xreadgroup(group, consumer, streams={stream: 0}) + # xack returns the number of ack'd elements + assert r.xack(stream, group, m1) == 1 + assert r.xack(stream, group, m2, m3) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xadd(self, r): + stream = 'stream' + message_id = r.xadd(stream, {'foo': 'bar'}) + assert re.match(br'[0-9]+\-[0-9]+', message_id) + + # explicit message id + message_id = b'9999999999999999999-0' + assert message_id == r.xadd(stream, {'foo': 'bar'}, id=message_id) + + # with maxlen, the list evicts the first message + r.xadd(stream, {'foo': 'bar'}, maxlen=2, approximate=False) + assert r.xlen(stream) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xclaim(self, r): + stream = 'stream' + group = 'group' + consumer1 = 'consumer1' + consumer2 = 'consumer2' + + message_id = r.xadd(stream, {'john': 'wick'}) + message = get_stream_message(r, stream, message_id) + r.xgroup_create(stream, group, 0) + + # trying to claim a message that isn't already pending doesn't + # do anything + response = r.xclaim(stream, group, consumer2, + min_idle_time=0, message_ids=(message_id,)) + assert response == [] + + # read the group as consumer1 to initially claim the messages + r.xreadgroup(group, consumer1, streams={stream: 0}) + + # claim the message as consumer2 + response = r.xclaim(stream, group, consumer2, + min_idle_time=0, message_ids=(message_id,)) + assert response[0] == message + + # reclaim the message as consumer1, but use the justid argument + # which only returns message ids + assert r.xclaim(stream, group, consumer1, + min_idle_time=0, message_ids=(message_id,), + justid=True) == [message_id] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xdel(self, r): + stream = 'stream' + + # deleting from an empty stream doesn't do anything + assert r.xdel(stream, 1) == 0 + + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + m3 = r.xadd(stream, {'foo': 'bar'}) + + # xdel returns the number of deleted elements + assert r.xdel(stream, m1) == 1 + assert r.xdel(stream, m2, m3) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_create(self, r): + # tests xgroup_create and xinfo_groups + stream = 'stream' + group = 'group' + r.xadd(stream, {'foo': 'bar'}) + + # no group is setup yet, no info to obtain + assert r.xinfo_groups(stream) == [] + + assert r.xgroup_create(stream, group, 0) + expected = [{ + 'name': group.encode(), + 'consumers': 0, + 'pending': 0, + 'last-delivered-id': b'0-0' + }] + assert r.xinfo_groups(stream) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_create_mkstream(self, r): + # tests xgroup_create and xinfo_groups + stream = 'stream' + group = 'group' + + # an error is raised if a group is created on a stream that + # doesn't already exist + with pytest.raises(exceptions.ResponseError): + r.xgroup_create(stream, group, 0) + + # however, with mkstream=True, the underlying stream is created + # automatically + assert r.xgroup_create(stream, group, 0, mkstream=True) + expected = [{ + 'name': group.encode(), + 'consumers': 0, + 'pending': 0, + 'last-delivered-id': b'0-0' + }] + assert r.xinfo_groups(stream) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_delconsumer(self, r): + stream = 'stream' + group = 'group' + consumer = 'consumer' + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + r.xgroup_create(stream, group, 0) + + # a consumer that hasn't yet read any messages doesn't do anything + assert r.xgroup_delconsumer(stream, group, consumer) == 0 + + # read all messages from the group + r.xreadgroup(group, consumer, streams={stream: 0}) + + # deleting the consumer should return 2 pending messages + assert r.xgroup_delconsumer(stream, group, consumer) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_destroy(self, r): + stream = 'stream' + group = 'group' + r.xadd(stream, {'foo': 'bar'}) + + # destroying a nonexistent group returns False + assert not r.xgroup_destroy(stream, group) + + r.xgroup_create(stream, group, 0) + assert r.xgroup_destroy(stream, group) + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xgroup_setid(self, r): + stream = 'stream' + group = 'group' + message_id = r.xadd(stream, {'foo': 'bar'}) + + r.xgroup_create(stream, group, 0) + # advance the last_delivered_id to the message_id + r.xgroup_setid(stream, group, message_id) + expected = [{ + 'name': group.encode(), + 'consumers': 0, + 'pending': 0, + 'last-delivered-id': message_id + }] + assert r.xinfo_groups(stream) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xinfo_consumers(self, r): + stream = 'stream' + group = 'group' + consumer1 = 'consumer1' + consumer2 = 'consumer2' + r.xadd(stream, {'foo': 'bar'}) + + r.xgroup_create(stream, group, 0) + r.xreadgroup(group, consumer1, streams={stream: 0}) + r.xreadgroup(group, consumer2, streams={stream: 0}) + info = r.xinfo_consumers(stream, group) + assert len(info) == 2 + expected = [ + {'name': consumer1.encode(), 'pending': 1}, + {'name': consumer2.encode(), 'pending': 0}, + ] + + # we can't determine the idle time, so just make sure it's an int + assert isinstance(info[0].pop('idle'), (int, long)) + assert isinstance(info[1].pop('idle'), (int, long)) + assert info == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xinfo_stream(self, r): + stream = 'stream' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + info = r.xinfo_stream(stream) + + assert info['length'] == 2 + assert info['first-entry'] == get_stream_message(r, stream, m1) + assert info['last-entry'] == get_stream_message(r, stream, m2) + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xlen(self, r): + stream = 'stream' + assert r.xlen(stream) == 0 + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + assert r.xlen(stream) == 2 + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xpending(self, r): + stream = 'stream' + group = 'group' + consumer1 = 'consumer1' + consumer2 = 'consumer2' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + r.xgroup_create(stream, group, 0) + + # xpending on a group that has no consumers yet + expected = { + 'pending': 0, + 'min': None, + 'max': None, + 'consumers': [] + } + assert r.xpending(stream, group) == expected + + # read 1 message from the group with each consumer + r.xreadgroup(group, consumer1, streams={stream: 0}, count=1) + r.xreadgroup(group, consumer2, streams={stream: m1}, count=1) + + expected = { + 'pending': 2, + 'min': m1, + 'max': m2, + 'consumers': [ + {'name': consumer1.encode(), 'pending': 1}, + {'name': consumer2.encode(), 'pending': 1}, + ] + } + assert r.xpending(stream, group) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xpending_range(self, r): + stream = 'stream' + group = 'group' + consumer1 = 'consumer1' + consumer2 = 'consumer2' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + r.xgroup_create(stream, group, 0) + + # xpending range on a group that has no consumers yet + assert r.xpending_range(stream, group) == [] + + # read 1 message from the group with each consumer + r.xreadgroup(group, consumer1, streams={stream: 0}, count=1) + r.xreadgroup(group, consumer2, streams={stream: m1}, count=1) + + response = r.xpending_range(stream, group) + assert len(response) == 2 + assert response[0]['message_id'] == m1 + assert response[0]['consumer'] == consumer1.encode() + assert response[1]['message_id'] == m2 + assert response[1]['consumer'] == consumer2.encode() + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xrange(self, r): + stream = 'stream' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + m3 = r.xadd(stream, {'foo': 'bar'}) + m4 = r.xadd(stream, {'foo': 'bar'}) + + def get_ids(results): + return [result[0] for result in results] + + results = r.xrange(stream, min=m1) + assert get_ids(results) == [m1, m2, m3, m4] + + results = r.xrange(stream, min=m2, max=m3) + assert get_ids(results) == [m2, m3] + + results = r.xrange(stream, max=m3) + assert get_ids(results) == [m1, m2, m3] + + results = r.xrange(stream, max=m2, count=1) + assert get_ids(results) == [m1] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xread(self, r): + stream = 'stream' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'bing': 'baz'}) + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m1), + get_stream_message(r, stream, m2), + ] + ] + ] + # xread starting at 0 returns both messages + assert r.xread(streams={stream: 0}) == expected + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m1), + ] + ] + ] + # xread starting at 0 and count=1 returns only the first message + assert r.xread(streams={stream: 0}, count=1) == expected + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m2), + ] + ] + ] + # xread starting at m1 returns only the second message + assert r.xread(streams={stream: m1}) == expected + + # xread starting at the last message returns an empty list + assert r.xread(streams={stream: m2}) == [] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xreadgroup(self, r): + stream = 'stream' + group = 'group' + consumer = 'consumer' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'bing': 'baz'}) + r.xgroup_create(stream, group, 0) + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m1), + get_stream_message(r, stream, m2), + ] + ] + ] + # xread starting at 0 returns both messages + assert r.xreadgroup(group, consumer, streams={stream: 0}) == expected + + r.xgroup_destroy(stream, group) + r.xgroup_create(stream, group, 0) + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m1), + ] + ] + ] + # xread starting at 0 and count=1 returns only the first message + assert r.xreadgroup(group, consumer, streams={stream: 0}, count=1) == \ + expected + + r.xgroup_destroy(stream, group) + r.xgroup_create(stream, group, 0) + + expected = [ + [ + stream, + [ + get_stream_message(r, stream, m2), + ] + ] + ] + # xread starting at m1 returns only the second message + assert r.xreadgroup(group, consumer, streams={stream: m1}) == expected + + r.xgroup_destroy(stream, group) + r.xgroup_create(stream, group, 0) + + # xread starting at the last message returns an empty message list + expected = [ + [ + stream, + [] + ] + ] + assert r.xreadgroup(group, consumer, streams={stream: m2}) == expected + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xrevrange(self, r): + stream = 'stream' + m1 = r.xadd(stream, {'foo': 'bar'}) + m2 = r.xadd(stream, {'foo': 'bar'}) + m3 = r.xadd(stream, {'foo': 'bar'}) + m4 = r.xadd(stream, {'foo': 'bar'}) + + def get_ids(results): + return [result[0] for result in results] + + results = r.xrevrange(stream, max=m4) + assert get_ids(results) == [m4, m3, m2, m1] + + results = r.xrevrange(stream, max=m3, min=m2) + assert get_ids(results) == [m3, m2] + + results = r.xrevrange(stream, min=m3) + assert get_ids(results) == [m4, m3] + + results = r.xrevrange(stream, min=m2, count=1) + assert get_ids(results) == [m4] + + @skip_if_server_version_lt('5.0.0') + @skip_for_no_cluster_impl() + def test_xtrim(self, r): + stream = 'stream' + + # trimming an empty key doesn't do anything + assert r.xtrim(stream, 1000) == 0 + + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + r.xadd(stream, {'foo': 'bar'}) + + # trimming an amount large than the number of messages + # doesn't do anything + assert r.xtrim(stream, 5, approximate=False) == 0 + + # 1 message is trimmed + assert r.xtrim(stream, 3, approximate=False) == 1 + + def test_bitfield_operations(self, r): + # comments show affected bits + bf = r.bitfield('a') + resp = (bf + .set('u8', 8, 255) # 00000000 11111111 + .get('u8', 0) # 00000000 + .get('u4', 8) # 1111 + .get('u4', 12) # 1111 + .get('u4', 13) # 111 0 + .execute()) + assert resp == [0, 0, 15, 15, 14] + + # .set() returns the previous value... + resp = (bf + .set('u8', 4, 1) # 0000 0001 + .get('u16', 0) # 00000000 00011111 + .set('u16', 0, 0) # 00000000 00000000 + .execute()) + assert resp == [15, 31, 31] + + # incrby adds to the value + resp = (bf + .incrby('u8', 8, 254) # 00000000 11111110 + .incrby('u8', 8, 1) # 00000000 11111111 + .get('u16', 0) # 00000000 11111111 + .execute()) + assert resp == [254, 255, 255] + + # Verify overflow protection works as a method: + r.delete('a') + resp = (bf + .set('u8', 8, 254) # 00000000 11111110 + .overflow('fail') + .incrby('u8', 8, 2) # incrby 2 would overflow, None returned + .incrby('u8', 8, 1) # 00000000 11111111 + .incrby('u8', 8, 1) # incrby 1 would overflow, None returned + .get('u16', 0) # 00000000 11111111 + .execute()) + assert resp == [0, None, 255, None, 255] + + # Verify overflow protection works as arg to incrby: + r.delete('a') + resp = (bf + .set('u8', 8, 255) # 00000000 11111111 + .incrby('u8', 8, 1) # 00000000 00000000 wrap default + .set('u8', 8, 255) # 00000000 11111111 + .incrby('u8', 8, 1, 'FAIL') # 00000000 11111111 fail + .incrby('u8', 8, 1) # 00000000 11111111 still fail + .get('u16', 0) # 00000000 11111111 + .execute()) + assert resp == [0, 0, 0, None, None, 255] + + # test default default_overflow + r.delete('a') + bf = r.bitfield('a', default_overflow='FAIL') + resp = (bf + .set('u8', 8, 255) # 00000000 11111111 + .incrby('u8', 8, 1) # 00000000 11111111 fail default + .get('u16', 0) # 00000000 11111111 + .execute()) + assert resp == [0, None, 255] + + @skip_if_server_version_lt('4.0.0') + def test_memory_usage(self, r): + r.set('foo', 'bar') + assert isinstance(r.memory_usage('foo'), int) + class TestRedisCommandsSort(object): # SORT From dfa78eef1598f96e4a1f6546ed5cd32a4340dfc0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:33:10 +0200 Subject: [PATCH 42/65] Add output of redis-py version when running travis-ci to allow for verify that we are working on the correct version of the upstream lib --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 376c0df0..1e206456 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,7 @@ install: - pip install -r dev-requirements.txt - pip install -e . - "if [[ $HIREDIS == '1' ]]; then pip install hiredis; fi" + - "pip freeze | grep redis" env: # Redis 3.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=3.0 From ccc1d5d0fb73e89415cd1d8dd4f9086e1dfc0c2c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:37:35 +0200 Subject: [PATCH 43/65] python 3.7 is still not possible and nightly is still the correct 3.7 python version to use --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1e206456..ffed6369 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,6 @@ python: - "3.4" - "3.5" - "3.6" - - "3.7" - "nightly" services: - redis-server From bb7581f62be8d13223e45746cea39e88a7ab671a Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:41:10 +0200 Subject: [PATCH 44/65] Print out all installed pythoon packages --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index ffed6369..8b0165c6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,7 @@ install: - pip install -e . - "if [[ $HIREDIS == '1' ]]; then pip install hiredis; fi" - "pip freeze | grep redis" + - "pip freeze" env: # Redis 3.0 & HIREDIS - HIREDIS=0 REDIS_VERSION=3.0 From 3a57b3f418c708da760fcb706194b424109006b4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:52:21 +0200 Subject: [PATCH 45/65] Add version block to spop and bitfield opertion that do not work on redis 3.0.x line --- tests/test_commands.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_commands.py b/tests/test_commands.py index 3e6fab94..6b50f73f 100644 --- a/tests/test_commands.py +++ b/tests/test_commands.py @@ -1005,6 +1005,7 @@ def test_spop(self, r): assert value in s assert r.smembers('a') == set(s) - {value} + @skip_if_server_version_lt('3.2.0') def test_spop_multi_value(self, r): s = [b'1', b'2', b'3'] r.sadd('a', *s) @@ -2385,6 +2386,7 @@ def test_xtrim(self, r): # 1 message is trimmed assert r.xtrim(stream, 3, approximate=False) == 1 + @skip_if_server_version_lt('3.2.0') def test_bitfield_operations(self, r): # comments show affected bits bf = r.bitfield('a') From 67f5125ac1763841c16ab8474be640edf1e58f5b Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 22:52:35 +0200 Subject: [PATCH 46/65] Install pysnooper as a default dev python package --- dev-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/dev-requirements.txt b/dev-requirements.txt index 57454c80..93a5bca0 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -9,3 +9,4 @@ tox python-coveralls ptpdb ptpython +pysnooper From b3d6d74b791969154fcc056eb2457abec9f1dd39 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 23:35:40 +0200 Subject: [PATCH 47/65] Fix all usages of _get_client() calls to use the correct Client class --- tests/conftest.py | 2 +- tests/test_cluster_obj.py | 12 ++++++------ tests/test_pipeline.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 19ed432d..8d2166a3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -137,7 +137,7 @@ def s(*args, **kwargs): """ Create a RedisCluster instance with 'init_slot_cache' set to false """ - s = _get_client(init_slot_cache=False, **kwargs) + s = _get_client(RedisCluster, init_slot_cache=False, **kwargs) assert s.connection_pool.nodes.slots == {} assert s.connection_pool.nodes.nodes == {} return s diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 756890dd..3d0161bd 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -79,7 +79,7 @@ def test_blocked_strict_redis_args(): assert c.connection_pool.connection_kwargs["socket_timeout"] == ClusterConnectionPool.RedisClusterDefaultTimeout with pytest.raises(RedisClusterException) as ex: - _get_client(db=1) + _get_client(RedisCluster, db=1) assert unicode(ex.value).startswith("Argument 'db' is not possible to use in cluster mode") @@ -91,14 +91,14 @@ def test_password_procted_nodes(): startup_nodes = [{"host": "127.0.0.1", "port": "7000"}] password_protected_startup_nodes = [{"host": "127.0.0.1", "port": "7100"}] with pytest.raises(RedisClusterException) as ex: - _get_client(startup_nodes=password_protected_startup_nodes) + _get_client(RedisCluster, startup_nodes=password_protected_startup_nodes) assert unicode(ex.value).startswith("ERROR sending 'cluster slots' command to redis server:") - _get_client(startup_nodes=password_protected_startup_nodes, password='password_is_protected') + _get_client(RedisCluster, startup_nodes=password_protected_startup_nodes, password='password_is_protected') with pytest.raises(RedisClusterException) as ex: - _get_client(startup_nodes=startup_nodes, password='password_is_protected') + _get_client(RedisCluster, startup_nodes=startup_nodes, password='password_is_protected') assert unicode(ex.value).startswith("ERROR sending 'cluster slots' command to redis server:") - _get_client(startup_nodes=startup_nodes) + _get_client(RedisCluster, startup_nodes=startup_nodes) def test_host_port_startup_node(): @@ -116,7 +116,7 @@ def test_empty_startup_nodes(): Test that exception is raised when empty providing empty startup_nodes """ with pytest.raises(RedisClusterException) as ex: - _get_client(init_slot_cache=False, startup_nodes=[]) + _get_client(RedisCluster, init_slot_cache=False, startup_nodes=[]) assert unicode(ex.value).startswith("No startup nodes provided"), unicode(ex.value) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index 72d190c1..e6b7d405 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -352,7 +352,7 @@ def test_redis_cluster_pipeline(self): """ Test that we can use a pipeline with the RedisCluster class """ - r = _get_client(cls=None) + r = _get_client(RedisCluster) with r.pipeline(transaction=False) as pipe: pipe.get("foobar") From 87096fdf91b15363e7c264f3daf6bad0ace551e9 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 28 Apr 2019 23:51:54 +0200 Subject: [PATCH 48/65] Fix test_representation and test_empty_startupnodes tests --- tests/test_cluster_obj.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 3d0161bd..5ff41e70 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -67,7 +67,7 @@ def execute_command(self, *args, **kwargs): def test_representation(r): - assert re.search('^RedisCluster<[0-9\.\:\,].+>$', str(r)) + assert re.search('^RedisCluster<[a-z0-9\.\:\,].+>$', str(r)) def test_blocked_strict_redis_args(): @@ -116,7 +116,7 @@ def test_empty_startup_nodes(): Test that exception is raised when empty providing empty startup_nodes """ with pytest.raises(RedisClusterException) as ex: - _get_client(RedisCluster, init_slot_cache=False, startup_nodes=[]) + r = RedisCluster(startup_nodes=[]) assert unicode(ex.value).startswith("No startup nodes provided"), unicode(ex.value) From a3b5adf85164ac25bc3c180beb2cd03984a6278d Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 29 Apr 2019 12:52:54 +0200 Subject: [PATCH 49/65] Pytest xfail for pubsub tests that is not supported fully in cluster mode --- tests/test_pubsub.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py index 566fd78b..4d49d626 100644 --- a/tests/test_pubsub.py +++ b/tests/test_pubsub.py @@ -481,6 +481,7 @@ def t_run(rc): class TestPubSubPubSubSubcommands(object): @skip_if_server_version_lt('2.8.0') + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_channels(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo', 'bar', 'baz', 'quux') @@ -488,6 +489,7 @@ def test_pubsub_channels(self, r): assert channels == [b'bar', b'baz', b'foo', b'quux'] @skip_if_server_version_lt('2.8.0') + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numsub(self, r): p1 = r.pubsub(ignore_subscribe_messages=True) p1.subscribe('foo', 'bar', 'baz') @@ -500,6 +502,7 @@ def test_pubsub_numsub(self, r): assert channels == r.pubsub_numsub('foo', 'bar', 'baz') @skip_if_server_version_lt('2.8.0') + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_pubsub_numpat(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.psubscribe('*oo', '*ar', 'b*z') @@ -509,6 +512,7 @@ def test_pubsub_numpat(self, r): class TestPubSubPings(object): @skip_if_server_version_lt('3.0.0') + @pytest.mark.xfail(reason="Pubsub is not fully supported in cluster mode") def test_send_pubsub_ping(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') @@ -518,7 +522,7 @@ def test_send_pubsub_ping(self, r): pattern=None) @skip_if_server_version_lt('3.0.0') - @pytest.mark.xfail(reason="Pattern pubsub do not work currently") + @pytest.mark.xfail(reason="Pattern pubsub is not fully supported in cluster mode") def test_send_pubsub_ping_message(self, r): p = r.pubsub(ignore_subscribe_messages=True) p.subscribe('foo') From 29924049f57bc92f8e48d0a69c2e8a58769b0c85 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 29 Apr 2019 14:55:33 +0200 Subject: [PATCH 50/65] Only run bitfield test on redis version >= 3.2.0 --- tests/test_pipeline.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py index e6b7d405..aa1617d2 100644 --- a/tests/test_pipeline.py +++ b/tests/test_pipeline.py @@ -8,7 +8,7 @@ from rediscluster.client import RedisCluster from rediscluster.connection import ClusterConnectionPool, ClusterReadOnlyConnectionPool from rediscluster.exceptions import RedisClusterException -from tests.conftest import _get_client +from tests.conftest import _get_client, skip_if_server_version_lt # 3rd party imports import pytest @@ -285,6 +285,7 @@ def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): assert r[key] == b'1' + @skip_if_server_version_lt('3.2.0') def test_pipeline_with_bitfield(self, r): with r.pipeline() as pipe: pipe.set('a', '1') From a70d220c894541bc9189a54b9e330c6e66e74404 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 30 Apr 2019 22:29:53 +0200 Subject: [PATCH 51/65] Update test_scripting to match upstream redis-py 3.0.1 --- tests/test_scripting.py | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/tests/test_scripting.py b/tests/test_scripting.py index 968fdc61..ca5afc21 100644 --- a/tests/test_scripting.py +++ b/tests/test_scripting.py @@ -93,45 +93,51 @@ def test_script_loading(self, r): def test_script_object(self, r): r.set('a', 2) multiply = r.register_script(multiply_script) - # test evalsha fail -> script load + retry + precalculated_sha = multiply.sha + assert precalculated_sha + assert r.script_exists(multiply.sha) == [False] + # Test second evalsha block (after NoScriptError) assert multiply(keys=['a'], args=[3]) == 6 - assert multiply.sha + # At this point, the script should be loaded assert r.script_exists(multiply.sha) == [True] - # test first evalsha + # Test that the precalculated sha matches the one from redis + assert multiply.sha == precalculated_sha + # Test first evalsha block assert multiply(keys=['a'], args=[3]) == 6 - @pytest.mark.xfail(reason="Not Yet Implemented") + @pytest.mark.xfail(reason="Script object not supported in cluster") def test_script_object_in_pipeline(self, r): multiply = r.register_script(multiply_script) - assert not multiply.sha + precalculated_sha = multiply.sha + assert precalculated_sha pipe = r.pipeline() pipe.set('a', 2) pipe.get('a') multiply(keys=['a'], args=[3], client=pipe) - # even though the pipeline wasn't executed yet, we made sure the - # script was loaded and got a valid sha - assert multiply.sha - assert r.script_exists(multiply.sha) == [True] + assert r.script_exists(multiply.sha) == [False] # [SET worked, GET 'a', result of multiple script] assert pipe.execute() == [True, b'2', 6] + # The script should have been loaded by pipe.execute() + assert r.script_exists(multiply.sha) == [True] + # The precalculated sha should have been the correct one + assert multiply.sha == precalculated_sha # purge the script from redis's cache and re-run the pipeline - # the multiply script object knows it's sha, so it shouldn't get - # reloaded until pipe.execute() + # the multiply script should be reloaded by pipe.execute() r.script_flush() pipe = r.pipeline() pipe.set('a', 2) pipe.get('a') - assert multiply.sha multiply(keys=['a'], args=[3], client=pipe) assert r.script_exists(multiply.sha) == [False] # [SET worked, GET 'a', result of multiple script] assert pipe.execute() == [True, b'2', 6] + assert r.script_exists(multiply.sha) == [True] - @pytest.mark.xfail(reason="Not Yet Implemented") + @pytest.mark.xfail(reason="LUA is not supported in cluster") def test_eval_msgpack_pipeline_error_in_lua(self, r): msgpack_hello = r.register_script(msgpack_hello_script) - assert not msgpack_hello.sha + assert msgpack_hello.sha pipe = r.pipeline() @@ -141,8 +147,9 @@ def test_eval_msgpack_pipeline_error_in_lua(self, r): msgpack_hello(args=[msgpack_message_1], client=pipe) - assert r.script_exists(msgpack_hello.sha) == [True] + assert r.script_exists(msgpack_hello.sha) == [False] assert pipe.execute()[0] == b'hello Joe' + assert r.script_exists(msgpack_hello.sha) == [True] msgpack_hello_broken = r.register_script(msgpack_hello_script_broken) From f8b7db519315fb893fd94d7b45fada979d618d04 Mon Sep 17 00:00:00 2001 From: James Ward Date: Wed, 6 Feb 2019 01:25:33 -0500 Subject: [PATCH 52/65] add test to support both kinds of cluster slots errors --- tests/test_node_manager.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/tests/test_node_manager.py b/tests/test_node_manager.py index 5972d59d..cd4fecc2 100644 --- a/tests/test_node_manager.py +++ b/tests/test_node_manager.py @@ -14,7 +14,7 @@ from mock import patch, Mock from redis import Redis from redis._compat import unicode -from redis import ConnectionError +from redis import ConnectionError, ResponseError pytestmark = skip_if_server_version_lt('2.9.0') @@ -282,11 +282,29 @@ def test_cluster_slots_error(): with patch.object(RedisCluster, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = Exception("foobar") - n = NodeManager(startup_nodes=[{}]) + n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) - with pytest.raises(RedisClusterException): + with pytest.raises(RedisClusterException) as e: n.initialize() + assert "ERROR sending 'cluster slots' command" in unicode(e) + + +def test_cluster_slots_error_expected_responseerror(): + """ + Check that exception is not raised if initialize can't execute + 'CLUSTER SLOTS' command but can hit other nodes. + """ + with patch.object(StrictRedis, 'execute_command') as execute_command_mock: + execute_command_mock.side_effect = ResponseError("MASTERDOWN") + + n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) + + with pytest.raises(RedisClusterException) as e: + n.initialize() + + assert 'Redis Cluster cannot be connected' in unicode(e) + def test_set_node(): """ From f6e93525bc902993a6b85dd84e2be0a1c1b41cfa Mon Sep 17 00:00:00 2001 From: James Ward Date: Wed, 6 Feb 2019 01:26:09 -0500 Subject: [PATCH 53/65] handle ResponseError gracefully in python 2 and 3 in Python 3 the responserror doesn't have a `message` value on it. instead, it just needs to be cast to a string fixes #278 --- rediscluster/nodemanager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rediscluster/nodemanager.py b/rediscluster/nodemanager.py index 2c69e38d..e46bd6b1 100644 --- a/rediscluster/nodemanager.py +++ b/rediscluster/nodemanager.py @@ -165,7 +165,8 @@ def initialize(self): continue except ResponseError as e: # Isn't a cluster connection, so it won't parse these exceptions automatically - if 'CLUSTERDOWN' in e.message or 'MASTERDOWN' in e.message: + message = e.__str__() + if 'CLUSTERDOWN' in message or 'MASTERDOWN' in message: continue else: raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node)) From 41afbc907ac1d3acd0dd561677ba7a27ded91ef7 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Tue, 23 Jul 2019 21:51:57 +0200 Subject: [PATCH 54/65] Fixed wrong classes that was patches to induce mocked exceptions --- tests/test_node_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_node_manager.py b/tests/test_node_manager.py index cd4fecc2..c8dbcb90 100644 --- a/tests/test_node_manager.py +++ b/tests/test_node_manager.py @@ -279,7 +279,7 @@ def test_cluster_slots_error(): Check that exception is raised if initialize can't execute 'CLUSTER SLOTS' command. """ - with patch.object(RedisCluster, 'execute_command') as execute_command_mock: + with patch.object(Redis, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = Exception("foobar") n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) @@ -295,7 +295,7 @@ def test_cluster_slots_error_expected_responseerror(): Check that exception is not raised if initialize can't execute 'CLUSTER SLOTS' command but can hit other nodes. """ - with patch.object(StrictRedis, 'execute_command') as execute_command_mock: + with patch.object(Redis, 'execute_command') as execute_command_mock: execute_command_mock.side_effect = ResponseError("MASTERDOWN") n = NodeManager(startup_nodes=[{"host": "127.0.0.1", "port": 7000}]) From b6ebf53a2f38906be38260a570e35f5708ae67d0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:02:15 +0200 Subject: [PATCH 55/65] Add line in readme about the supported redis-py version range for this major release --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 432b8b2f..27b430ff 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,8 @@ Latest stable release from pypi $ pip install redis-py-cluster ``` +This major version of `redis-py-cluster` supports `redis-py>=3.0.0,<3.1.0`. + ## Usage example From de944503981eeb17c5e741e345debdc1b91b96c4 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:03:16 +0200 Subject: [PATCH 56/65] Update range of year for copyright --- LICENSE | 2 +- README.md | 2 +- docs/License.txt | 2 +- docs/license.rst | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/LICENSE b/LICENSE index 66ccb488..f2a09d18 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2018 Johan Andersson +Copyright (c) 2014-2019 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation diff --git a/README.md b/README.md index 27b430ff..925942f0 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ True ## License & Authors -Copyright (c) 2013-2018 Johan Andersson +Copyright (c) 2013-2019 Johan Andersson MIT (See docs/License.txt file) diff --git a/docs/License.txt b/docs/License.txt index bf0afb13..ceabc499 100644 --- a/docs/License.txt +++ b/docs/License.txt @@ -1,4 +1,4 @@ -Copyright (c) 2014-2018 Johan Andersson +Copyright (c) 2014-2019 Johan Andersson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation diff --git a/docs/license.rst b/docs/license.rst index 305c9087..d023468c 100644 --- a/docs/license.rst +++ b/docs/license.rst @@ -1,7 +1,7 @@ Licensing --------- -Copyright (c) 2013-2018 Johan Andersson +Copyright (c) 2013-2019 Johan Andersson MIT (See docs/License.txt file) From c6bf328a533f3dc02320ea61f592d371bf3f97ce Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:08:52 +0200 Subject: [PATCH 57/65] Remove gitter link as that chatt room is no longer in use --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 925942f0..ce98a95d 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,6 @@ This client provides a client for redis cluster that was added in redis 3.0. This project is a port of `redis-rb-cluster` by antirez, with alot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster -Gitter chat room: [![Gitter](https://badges.gitter.im/Grokzen/redis-py-cluster.svg)](https://gitter.im/Grokzen/redis-py-cluster?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) - [![Build Status](https://travis-ci.org/Grokzen/redis-py-cluster.svg?branch=master)](https://travis-ci.org/Grokzen/redis-py-cluster) [![Coverage Status](https://coveralls.io/repos/Grokzen/redis-py-cluster/badge.png)](https://coveralls.io/r/Grokzen/redis-py-cluster) [![PyPI version](https://badge.fury.io/py/redis-py-cluster.svg)](http://badge.fury.io/py/redis-py-cluster) From 2543b1adbc409008469c4924365c3a509c0a6aa0 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:32:57 +0200 Subject: [PATCH 58/65] Remove two broken tests since readonly from clients was introduced. Updated variabels in get_mocked_redis_client to avoid overwriting variables between inner and outer scope --- tests/test_cluster_obj.py | 60 ++------------------------------------- 1 file changed, 3 insertions(+), 57 deletions(-) diff --git a/tests/test_cluster_obj.py b/tests/test_cluster_obj.py index 5ff41e70..02bbc8ff 100644 --- a/tests/test_cluster_obj.py +++ b/tests/test_cluster_obj.py @@ -38,8 +38,8 @@ def get_mocked_redis_client(*args, **kwargs): on different installations and machines. """ with patch.object(Redis, 'execute_command') as execute_command_mock: - def execute_command(self, *args, **kwargs): - if args[0] == 'slots': + def execute_command(self, *_args, **_kwargs): + if _args[0] == 'slots': mock_cluster_slots = [ [ 0, 5460, @@ -58,7 +58,7 @@ def execute_command(self, *args, **kwargs): ] ] return mock_cluster_slots - elif args[0] == 'cluster-require-full-coverage': + elif _args[0] == 'cluster-require-full-coverage': return {'cluster-require-full-coverage': 'yes'} execute_command_mock.side_effect = execute_command @@ -410,60 +410,6 @@ def ok_response(connection, *args, **options): assert p.execute() == ["MOCK_OK"] -def assert_moved_redirection_on_slave(sr, connection_pool_cls, cluster_obj): - """ - """ - # we assume this key is set on 127.0.0.1:7000(7003) - sr.set('foo16706', 'foo') - time.sleep(1) - - with patch.object(connection_pool_cls, 'get_node_by_slot') as return_slave_mock: - return_slave_mock.return_value = { - 'name': '127.0.0.1:7004', - 'host': '127.0.0.1', - 'port': 7004, - 'server_type': 'slave', - } - - master_value = { - 'host': '127.0.0.1', - 'name': '127.0.0.1:7000', - 'port': 7000, - 'server_type': 'master', - } - - with patch.object(ClusterConnectionPool, 'get_master_node_by_slot') as return_master_mock: - return_master_mock.return_value = master_value - assert cluster_obj.get('foo16706') == b'foo' - assert return_master_mock.call_count == 1 - - -def test_moved_redirection_on_slave_with_default_client(sr): - """ - Test that the client is redirected normally with default - (readonly_mode=False) client even when we connect always to slave. - """ - r = get_mocked_redis_client(host="127.0.0.1", port=7000) - - assert_moved_redirection_on_slave( - sr, - ClusterConnectionPool, - # RedisCluster(host="127.0.0.1", port=7000, reinitialize_steps=1) - get_mocked_redis_client(host="127.0.0.1", port=7000, reinitialize_steps=1) - ) - - -def test_moved_redirection_on_slave_with_readonly_mode_client(sr): - """ - Ditto with READONLY mode. - """ - assert_moved_redirection_on_slave( - sr, - ClusterReadOnlyConnectionPool, - RedisCluster(host="127.0.0.1", port=7000, readonly_mode=True, reinitialize_steps=1) - ) - - def test_access_correct_slave_with_readonly_mode_client(sr): """ Test that the client can get value normally with readonly mode From 025d75655814969abf03f49265405290ab500216 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:51:20 +0200 Subject: [PATCH 59/65] Remove old parallel docs about threaded pipelines and add a updated section about how it is implemented right now. Fixes #250 --- docs/index.rst | 1 - docs/pipelines.rst | 13 ++++++++++- docs/threads.rst | 57 ---------------------------------------------- 3 files changed, 12 insertions(+), 59 deletions(-) delete mode 100644 docs/threads.rst diff --git a/docs/index.rst b/docs/index.rst index 9041cbcf..70396bb5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -116,7 +116,6 @@ The Usage Guide commands limitations-and-differences pipelines - threads pubsub readonly-mode diff --git a/docs/pipelines.rst b/docs/pipelines.rst index c092b604..942cd7f4 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -70,7 +70,18 @@ Packing Commands When issuing only a single command, there is only one network round trip to be made. But what if you issue 100 pipelined commands? In a single-instance redis configuration, you still only need to make one network hop. The commands are packed into a single request and the server responds with all the data for those requests in a single response. But with redis cluster, those keys could be spread out over many different nodes. -The client is responsible for figuring out which commands map to which nodes. Let's say for example that your 100 pipelined commands need to route to 3 different nodes? The first thing the client does is break out the commands that go to each node, so it only has 3 network requests to make instead of 100. +The client is responsible for figuring out which commands map to which nodes. Let's say for example that your 100 pipelined commands need to route to 3 different nodes? The first thing the client does is break out the commands that go to each node, so it only has 3 network requests to make instead of 100. + + +Parallel execution of pipeline +------------------------------ + +In older version of `redis-py-cluster`, there was a thread implementation that helped to increaes the performance of running pipelines by running the connections and execution of all commands to all nodes in the pipeline in paralell. This implementation was later removed in favor of a much simpler and faster implementation. + +In this new implementation we execute everything in the same thread, but we do all the writing to all sockets in order to each different server and then start to wait for them in sequence until all of them is complete. There is no real need to run them in parralell since we still have to wait for a thread join of all parralell executions before the code can continue, so we can wait in sequence for all of them to complete. This is not the absolute fastest implementation, but it much simpler to implement and maintain and cause less issues becuase there is no threads or other parallel ipmlementation that will use some overhead and add complexity to the method. + +This feature is implemented by default and will be used in all pipeline requests. + Transactions and WATCH diff --git a/docs/threads.rst b/docs/threads.rst deleted file mode 100644 index 790db8c7..00000000 --- a/docs/threads.rst +++ /dev/null @@ -1,57 +0,0 @@ -Threaded Pipeline -================= - -Redis cluster optionally supports parallel execution of pipelined commands to reduce latency of pipelined requests via threads. - - -Rationale ---------- - -When pipelining a bunch of commands to the cluster, many of the commands may be routed to different nodes in the cluster. The client-server design in redis-cluster dictates that the client communicates directly with each node in the cluster rather than treating each node as a homogenous group. - -The advantage to this design is that a smart client can communicate with the cluster with the same latency characteristics as it might communicate with a single-instance redis cluster. But only if the client can communicate with each node in parallel. - - - -Parallel network i/o using threads ----------------------------------- - -That's pretty good. But we are still issuing those 3 network requests in serial order. The code loops through each node and issues a request, then gets the response, then issues the next one. - -We improve the situation by using python threads, making each request in parallel over the network. Now we are only as slow as the slowest single request. - -### Disabling Threads -You can disable threaded execution either in the class constructor: - -.. code-block:: python - - r = rediscluster.RedisCluster( ... pipeline_use_threads=False) #true by default - pipe = r.pipeline() - -Or you can disable it on a case by case basis as you instantiate the pipeline object. - -.. code-block:: python - - pipe = r.pipeline(use_threads=False) - -The later example always overrides if explicitly set. Otherwise, it falls back on the value passed to the RedisCluster constructor. - - - -Footnote: Gevent ----------------- - -Python offers something even more lightweight and efficient than threads to perform tasks in parallel: GEVENT. - -You can read up more about gevent here: http://www.gevent.org/ - -If you want to try to get the benefits of gevent in redis-py-cluster, you can monkey patch your code with the following lines at the very beginning of your application: - -.. code-block:: python - - import os - os.environ["GEVENT_RESOLVER"] = "ares" - import gevent.monkey - gevent.monkey.patch_all() - -This will patch the python socket code, threaded libraries, and dns resolution into a single threaded application substituting coroutines for parallel threads. From 3fb29e90ff61aa71465431f70712e7aa4f4fb83c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Sun, 11 Aug 2019 17:57:51 +0200 Subject: [PATCH 60/65] Update some text and reformat layout of some text blocks in index.rst --- docs/index.rst | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 70396bb5..5b330c91 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,8 +6,9 @@ Welcome to redis-py-cluster's documentation! ============================================ -This project is a port of `redis-rb-cluster` by antirez, with a lot of added functionality. The original source can be found at https://github.com/antirez/redis-rb-cluster. +This project is a port of `redis-rb-cluster` by antirez, with a lot of added functionality. +The original source can be found at https://github.com/antirez/redis-rb-cluster. The source code is `available on github`_. @@ -52,18 +53,29 @@ Small sample script that shows how to get started with RedisCluster. It can also >>> print(rc.get("foo")) 'bar' - .. note:: Python 3 - Since Python 3 changed to Unicode strings from Python 2's ASCII, the return type of *most* commands will be binary strings, unless the class is instantiated with the option ``decode_responses=True``. In this case, the responses will be Python 3 strings (Unicode). For the init argument `decode_responses`, when set to False, redis-py-cluster will not attempt to decode the responses it receives. In Python 3, this means the responses will be of type `bytes`. In Python 2, they will be native strings (`str`). If `decode_responses` is set to True, for Python 3 responses will be `str`, for Python 2 they will be `unicode`. + Since Python 3 changed to Unicode strings from Python 2's ASCII, the return type of *most* commands will be binary strings, + unless the class is instantiated with the option ``decode_responses=True``. + + In this case, the responses will be Python 3 strings (Unicode). + + For the init argument `decode_responses`, when set to False, redis-py-cluster will not attempt to decode the responses it receives. + + In Python 3, this means the responses will be of type `bytes`. In Python 2, they will be native strings (`str`). + + If `decode_responses` is set to True, for Python 3 responses will be `str`, for Python 2 they will be `unicode`. + + Dependencies & supported python versions ---------------------------------------- -- Python: redis >= `2.10.2`, <= `2.10.5` is required. - Older versions in the `2.10.x` series can work but using the latest one is allways recommended. +It is always recommended to use the latest version of the dependencies of this project. + +- Redis-py: 'redis>=3.0.0,<3.1.0' is required in this major version of this cluster lib. - Optional Python: hiredis >= `0.2.0`. Older versions might work but is not tested. -- A working Redis cluster based on version >= `3.0.0` is required. Only `3.0.x` releases is supported. +- A working Redis cluster based on version `>=3.0.0` is required. @@ -78,7 +90,13 @@ Supported python versions .. note:: Python 3.4.0 - A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python `3.4.0`. Because of this both `redis-py` and `redis-py-cluster` will not work when running with `3.4.0`. This lib has decided to block the lib from execution on `3.4.0` and you will get a exception when trying to import the code. The only solution is to use python `3.4.1` or some other higher minor version in the `3.4` series. + A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python `3.4.0`. + + Because of this both `redis-py` and `redis-py-cluster` will not work when running with `3.4.0`. + + This lib has decided to block the lib from execution on `3.4.0` and you will get a exception when trying to import the code. + + The only solution is to use python `3.4.1` or some other higher minor version in the `3.4` series. From f14e7735d8cd8d1c1adeb2415161cc86a3a00558 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 00:29:53 +0200 Subject: [PATCH 61/65] Add a better exception message to get_master_node_by_slot() in the case where a full cluster is not initialied yet. Fixes #288 --- rediscluster/connection.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/rediscluster/connection.py b/rediscluster/connection.py index 62a89c91..c8798740 100644 --- a/rediscluster/connection.py +++ b/rediscluster/connection.py @@ -308,7 +308,7 @@ def get_connection_by_slot(self, slot): try: return self.get_connection_by_node(self.get_node_by_slot(slot)) - except KeyError: + except (KeyError, RedisClusterException) as exc: return self.get_random_connection() def get_connection_by_node(self, node): @@ -331,7 +331,12 @@ def get_connection_by_node(self, node): def get_master_node_by_slot(self, slot): """ """ - return self.nodes.slots[slot][0] + try: + return self.nodes.slots[slot][0] + except KeyError as ke: + raise RedisClusterException('Slot "{slot}" not covered by the cluster. "skip_full_coverage_check={skip_full_coverage_check}"'.format( + slot=slot, skip_full_coverage_check=self.nodes._skip_full_coverage_check, + )) def get_node_by_slot(self, slot, *args, **kwargs): """ From d5a1b703b08ea6c3dd32cd654f9f087618b1e26c Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 00:35:13 +0200 Subject: [PATCH 62/65] Add python 3.7 to the compatible python version classifier list --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 4d8fcca0..00d2d82f 100644 --- a/setup.py +++ b/setup.py @@ -60,6 +60,7 @@ 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Environment :: Web Environment', 'Operating System :: POSIX', 'License :: OSI Approved :: MIT License', From db5235dfa229469a98f721a1b536555afc103d93 Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 00:37:00 +0200 Subject: [PATCH 63/65] Update default REDIS_VERSION in Makefile to be 5.0.5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0c2e1766..563494de 100644 --- a/Makefile +++ b/Makefile @@ -216,7 +216,7 @@ ifndef REDIS_TRIB_RB endif ifndef REDIS_VERSION - REDIS_VERSION=4.0.10 + REDIS_VERSION=5.0.5 endif export REDIS_CLUSTER_NODE1_CONF From c46913b169e5dc5df615024b8d3d7bcf34230bdd Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 00:51:29 +0200 Subject: [PATCH 64/65] Minor updates to index.rst to make text flow and read a bit better. Add some minor additional details to some parts to clearify some certain things. --- docs/index.rst | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 5b330c91..49e38c1a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,7 +10,7 @@ This project is a port of `redis-rb-cluster` by antirez, with a lot of added fun The original source can be found at https://github.com/antirez/redis-rb-cluster. -The source code is `available on github`_. +The source code for this project is `available on github`_. .. _available on github: http://github.com/grokzen/redis-py-cluster @@ -33,10 +33,12 @@ or from source code -Usage example +Basic usage example ------------- -Small sample script that shows how to get started with RedisCluster. It can also be found in the file `exmaples/basic.py` +Small sample script that shows how to get started with RedisCluster. It can also be found in the file `exmaples/basic.py`. + +Additional code examples of more advance functionality can be found in the `examples/` folder in the source code git repo. .. code-block:: python @@ -68,8 +70,8 @@ Small sample script that shows how to get started with RedisCluster. It can also -Dependencies & supported python versions ----------------------------------------- +Library Dependencies +-------------------- It is always recommended to use the latest version of the dependencies of this project. @@ -82,15 +84,19 @@ It is always recommended to use the latest version of the dependencies of this p Supported python versions ------------------------- -- 2.7 +Python versions should follow the same supported python versions as specificed by the upstream package `redis-py`, based on what major version(s) that is specified. + +If this library supports more then one major version line of `redis-py`, then the supported python versions must include the set of supported python versions by all major version lines. + +- 2.7.x - 3.4.1+ (See note) -- 3.5 -- 3.6 -- 3.7 +- 3.5.x +- 3.6.x +- 3.7.x .. note:: Python 3.4.0 - A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python `3.4.0`. + A segfault was found when running `redis-py` in python `3.4.0` that was introduced into the codebase in python itself in the version `3.4.0`. Because of this both `redis-py` and `redis-py-cluster` will not work when running with `3.4.0`. @@ -98,14 +104,16 @@ Supported python versions The only solution is to use python `3.4.1` or some other higher minor version in the `3.4` series. + When python `3.8.0` is released and when it is added to as a supported pythoon version, python 3.4.x will be removed from supported versions and this hard block will be removed from the source code. + -Regarding duplicate pypi and python naming ------------------------------------------- +Regarding duplicate package name on pypi +---------------------------------------- It has been found that the python module name that is used in this library (rediscluster) is already shared with a similar but older project. -This lib will not change the naming of the module to something else to prevent collisions between the libs. +This lib will `NOT` change the naming of the module to something else to prevent collisions between the libs. My reasoning for this is the following From a59784941c997792d2348d7242243042f6e0c50f Mon Sep 17 00:00:00 2001 From: Grokzen Date: Mon, 12 Aug 2019 01:29:29 +0200 Subject: [PATCH 65/65] Write release notes for 2.0.0 release and added upgrade instructions for the release --- docs/release-notes.rst | 19 +++++++++++++++++++ docs/upgrading.rst | 14 ++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/docs/release-notes.rst b/docs/release-notes.rst index e0c89498..c5579bdf 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -1,6 +1,25 @@ Release Notes ============= +2.0.0 (Aug 12, 2019) + +Specific changes to redis-py-cluster is mentioned below here. + + * Update entire code base to now support all redis-py version in the 3.0.x version line. Any future redis-py version will be supported at a later time. + * Major update to all tests to mirror the code of the same tests from redis-py + * Dropped support for the 2.10.6 redis-py release. + * Add pythoncodestyle lint validation check to travis-ci runs to check for proper linting before accepting PR:s + * Class StrictRedisCluster was renamed to RedisCluster + * Class StrictRedis has been removed to mirror upstream class structure + * Class StrictClusterPipeline was renamed to ClusterPipeline + * Fixed travis-ci tests not running properly on python 3.7 + * Fixed documentation regarding threads in pipelines + * Update lit of command callbacks and parsers. Added in "CLIENT ID" + * Removed custom implementation of SORT and revert back to use same-slot mechanism for that command. + * Added better exception message to get_master_node_by_slot command to help the user understand the error. + * Improved the exception object message parsing when running on python3 + + 1.3.6 (Nov 16, 2018) -------------------- diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 66d20c7d..fa7cedd5 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -3,6 +3,20 @@ Upgrading redis-py-cluster This document describes what must be done when upgrading between different versions to ensure that code still works. +1.3.x --> 2.0.0 +--------------- + +Redis-py upstream package dependency has now been updated to be any of the releases in the major version line 3.0.x. This means that you must upgrade your dependency from 2.10.6 to the latest version. Several internal components have been updated to reflect the code from 3.0.x. + +Class StrictRedisCluster was renamed to RedisCluster. All usages of this class must be updated. + +Class StrictRedis has been removed to mirror upstream class structure. + +Class StrictClusterPipeline was renamed to ClusterPipeline. + +Method SORT has been changed back to only allow to be executed if keys is in the same slot. No more client side parsing and handling of the keys and values. + + 1.3.2 --> Next Release ----------------------